filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_27259 | # Sharon Gibbons, 12-03-2018
# Project Euler Problem 5: Smallest multiple
# https://projecteuler.net/problem=5
# https://www.programiz.com/python-programming/examples/lcm
# Python program to find the least common multiple for a range of numbers
# defines function
# https://codility.com/media/train/10-Gcd.pdf
def findgcd(x, y):
"""This function returns the greatest common factor/divisor."""
if x % y == 0: # if statement to identify
return y
else:
return findgcd(y, x % y)
# defines findlcm function
# https://codility.com/media/train/10-Gcd.pdf
def findlcm(x, y):
"""This function returns the least common multiple."""
# least common multiple is the computation of the product of the input integers divided by gcd of the input integers
lcm = int(int(x * y) // int(findgcd(x, y)))
return lcm
# sets variables and keyword argument
l = range(1,21)
x = l[0]
y = l[1]
lcm = findlcm(x, y)
# iterates called function over sequence
for i in range(1,20):
lcm = findlcm(lcm, l[i])
# output
print(lcm)
|
the-stack_106_27261 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
from paddle.nn.initializer import Normal, Constant
import numpy as np
import math
import cv2
from ..utils.keypoint_utils import transform_preds
from ..utils.workspace import register, create
__all__ = ['TopDownHRNet']
class BaseArch(nn.Layer):
def __init__(self, data_format='NCHW'):
super(BaseArch, self).__init__()
self.data_format = data_format
self.inputs = {}
self.fuse_norm = False
def load_meanstd(self, cfg_transform):
self.scale = 1.
self.mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape(
(1, 3, 1, 1))
self.std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))
for item in cfg_transform:
if 'NormalizeImage' in item:
self.mean = paddle.to_tensor(item['NormalizeImage'][
'mean']).reshape((1, 3, 1, 1))
self.std = paddle.to_tensor(item['NormalizeImage'][
'std']).reshape((1, 3, 1, 1))
if item['NormalizeImage'].get('is_scale', True):
self.scale = 1. / 255.
break
if self.data_format == 'NHWC':
self.mean = self.mean.reshape(1, 1, 1, 3)
self.std = self.std.reshape(1, 1, 1, 3)
def forward(self, inputs):
if self.data_format == 'NHWC':
image = inputs['image']
inputs['image'] = paddle.transpose(image, [0, 2, 3, 1])
if self.fuse_norm:
image = inputs['image']
self.inputs['image'] = (image * self.scale - self.mean) / self.std
self.inputs['im_shape'] = inputs['im_shape']
self.inputs['scale_factor'] = inputs['scale_factor']
else:
self.inputs = inputs
self.model_arch()
if self.training:
out = self.get_loss()
else:
out = self.get_pred()
return out
def build_inputs(self, data, input_def):
inputs = {}
for i, k in enumerate(input_def):
inputs[k] = data[i]
return inputs
def model_arch(self, ):
pass
def get_loss(self, ):
raise NotImplementedError("Should implement get_loss method!")
def get_pred(self, ):
raise NotImplementedError("Should implement get_pred method!")
def Conv2d(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
weight_init=Normal(std=0.001),
bias_init=Constant(0.)):
weight_attr = paddle.framework.ParamAttr(initializer=weight_init)
if bias:
bias_attr = paddle.framework.ParamAttr(initializer=bias_init)
else:
bias_attr = False
conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
weight_attr=weight_attr,
bias_attr=bias_attr)
return conv
@register
class TopDownHRNet(BaseArch):
__category__ = 'architecture'
__inject__ = ['loss']
def __init__(self,
width,
num_joints,
backbone='HRNet',
loss='KeyPointMSELoss',
post_process='HRNetPostProcess',
flip_perm=None,
flip=True,
shift_heatmap=True,
use_dark=True):
"""
HRNet network, see https://arxiv.org/abs/1902.09212
Args:
backbone (nn.Layer): backbone instance
post_process (object): `HRNetPostProcess` instance
flip_perm (list): The left-right joints exchange order list
use_dark(bool): Whether to use DARK in post processing
"""
super(TopDownHRNet, self).__init__()
self.backbone = backbone
self.post_process = HRNetPostProcess(use_dark)
self.loss = loss
self.flip_perm = flip_perm
self.flip = flip
self.final_conv = Conv2d(width, num_joints, 1, 1, 0, bias=True)
self.shift_heatmap = shift_heatmap
self.deploy = False
@classmethod
def from_config(cls, cfg, *args, **kwargs):
# backbone
backbone = create(cfg['backbone'])
return {'backbone': backbone, }
def _forward(self):
feats = self.backbone(self.inputs)
hrnet_outputs = self.final_conv(feats[0])
if self.training:
return self.loss(hrnet_outputs, self.inputs)
elif self.deploy:
outshape = hrnet_outputs.shape
max_idx = paddle.argmax(
hrnet_outputs.reshape(
(outshape[0], outshape[1], outshape[2] * outshape[3])),
axis=-1)
return hrnet_outputs, max_idx
else:
if self.flip:
self.inputs['image'] = self.inputs['image'].flip([3])
feats = self.backbone(self.inputs)
output_flipped = self.final_conv(feats[0])
output_flipped = self.flip_back(output_flipped.numpy(),
self.flip_perm)
output_flipped = paddle.to_tensor(output_flipped.copy())
if self.shift_heatmap:
output_flipped[:, :, :, 1:] = output_flipped.clone(
)[:, :, :, 0:-1]
hrnet_outputs = (hrnet_outputs + output_flipped) * 0.5
imshape = (self.inputs['im_shape'].numpy()
)[:, ::-1] if 'im_shape' in self.inputs else None
center = self.inputs['center'].numpy(
) if 'center' in self.inputs else np.round(imshape / 2.)
scale = self.inputs['scale'].numpy(
) if 'scale' in self.inputs else imshape / 200.
outputs = self.post_process(hrnet_outputs, center, scale)
return outputs
def get_loss(self):
return self._forward()
def get_pred(self):
res_lst = self._forward()
outputs = {'keypoint': res_lst}
return outputs
def flip_back(self, output_flipped, matched_parts):
assert output_flipped.ndim == 4,\
'output_flipped should be [batch_size, num_joints, height, width]'
output_flipped = output_flipped[:, :, :, ::-1]
for pair in matched_parts:
tmp = output_flipped[:, pair[0], :, :].copy()
output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]
output_flipped[:, pair[1], :, :] = tmp
return output_flipped
class HRNetPostProcess(object):
def __init__(self, use_dark=True):
self.use_dark = use_dark
def get_max_preds(self, heatmaps):
'''get predictions from score maps
Args:
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints
'''
assert isinstance(heatmaps,
np.ndarray), 'heatmaps should be numpy.ndarray'
assert heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
width = heatmaps.shape[3]
heatmaps_reshaped = heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def gaussian_blur(self, heatmap, kernel):
border = (kernel - 1) // 2
batch_size = heatmap.shape[0]
num_joints = heatmap.shape[1]
height = heatmap.shape[2]
width = heatmap.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(heatmap[i, j])
dr = np.zeros((height + 2 * border, width + 2 * border))
dr[border:-border, border:-border] = heatmap[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmap[i, j] = dr[border:-border, border:-border].copy()
heatmap[i, j] *= origin_max / np.max(heatmap[i, j])
return heatmap
def dark_parse(self, hm, coord):
heatmap_height = hm.shape[0]
heatmap_width = hm.shape[1]
px = int(coord[0])
py = int(coord[1])
if 1 < px < heatmap_width - 2 and 1 < py < heatmap_height - 2:
dx = 0.5 * (hm[py][px + 1] - hm[py][px - 1])
dy = 0.5 * (hm[py + 1][px] - hm[py - 1][px])
dxx = 0.25 * (hm[py][px + 2] - 2 * hm[py][px] + hm[py][px - 2])
dxy = 0.25 * (hm[py+1][px+1] - hm[py-1][px+1] - hm[py+1][px-1] \
+ hm[py-1][px-1])
dyy = 0.25 * (
hm[py + 2 * 1][px] - 2 * hm[py][px] + hm[py - 2 * 1][px])
derivative = np.matrix([[dx], [dy]])
hessian = np.matrix([[dxx, dxy], [dxy, dyy]])
if dxx * dyy - dxy**2 != 0:
hessianinv = hessian.I
offset = -hessianinv * derivative
offset = np.squeeze(np.array(offset.T), axis=0)
coord += offset
return coord
def dark_postprocess(self, hm, coords, kernelsize):
'''DARK postpocessing, Zhang et al. Distribution-Aware Coordinate
Representation for Human Pose Estimation (CVPR 2020).
'''
hm = self.gaussian_blur(hm, kernelsize)
hm = np.maximum(hm, 1e-10)
hm = np.log(hm)
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
coords[n, p] = self.dark_parse(hm[n][p], coords[n][p])
return coords
def get_final_preds(self, heatmaps, center, scale, kernelsize=3):
"""the highest heatvalue location with a quarter offset in the
direction from the highest response to the second highest response.
Args:
heatmaps (numpy.ndarray): The predicted heatmaps
center (numpy.ndarray): The boxes center
scale (numpy.ndarray): The scale factor
Returns:
preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords
maxvals: numpy.ndarray([batch_size, num_joints, 1]), the maximum confidence of the keypoints
"""
coords, maxvals = self.get_max_preds(heatmaps)
heatmap_height = heatmaps.shape[2]
heatmap_width = heatmaps.shape[3]
if self.use_dark:
coords = self.dark_postprocess(heatmaps, coords, kernelsize)
else:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
diff = np.array([
hm[py][px + 1] - hm[py][px - 1],
hm[py + 1][px] - hm[py - 1][px]
])
coords[n][p] += np.sign(diff) * .25
preds = coords.copy()
# Transform back
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center[i], scale[i],
[heatmap_width, heatmap_height])
return preds, maxvals
def __call__(self, output, center, scale):
preds, maxvals = self.get_final_preds(output.numpy(), center, scale)
outputs = [[
np.concatenate(
(preds, maxvals), axis=-1), np.mean(
maxvals, axis=1)
]]
return outputs
|
the-stack_106_27262 | """
A two-step (registration followed by activation) workflow, implemented
by emailing an HMAC-verified timestamped activation token to the user
on signup.
"""
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core import signing
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django_registration import signals
from django_registration.exceptions import ActivationError
from django_registration.views import ActivationView as BaseActivationView
from django_registration.views import RegistrationView as BaseRegistrationView
REGISTRATION_SALT = getattr(settings, "REGISTRATION_SALT", "registration")
class RegistrationView(BaseRegistrationView):
"""
Register a new (inactive) user account, generate an activation key
and email it to the user.
This is different from the model-based activation workflow in that
the activation key is the username, signed using Django's
TimestampSigner, with HMAC verification on activation.
"""
email_body_template = "django_registration/activation_email_body.txt"
email_subject_template = "django_registration/activation_email_subject.txt"
success_url = reverse_lazy("django_registration_complete")
def register(self, form):
new_user = self.create_inactive_user(form)
signals.user_registered.send(
sender=self.__class__, user=new_user, request=self.request
)
return new_user
def create_inactive_user(self, form):
"""
Create the inactive user account and send an email containing
activation instructions.
"""
new_user = form.save(commit=False)
new_user.is_active = False
new_user.save()
self.send_activation_email(new_user)
return new_user
def get_activation_key(self, user):
"""
Generate the activation key which will be emailed to the user.
"""
return signing.dumps(obj=user.get_username(), salt=REGISTRATION_SALT)
def get_email_context(self, activation_key):
"""
Build the template context used for the activation email.
"""
scheme = "https" if self.request.is_secure() else "http"
return {
"scheme": scheme,
"activation_key": activation_key,
"expiration_days": settings.ACCOUNT_ACTIVATION_DAYS,
"site": get_current_site(self.request),
}
def send_activation_email(self, user):
"""
Send the activation email. The activation key is the username,
signed using TimestampSigner.
"""
activation_key = self.get_activation_key(user)
context = self.get_email_context(activation_key)
context["user"] = user
subject = render_to_string(
template_name=self.email_subject_template,
context=context,
request=self.request,
)
# Force subject to a single line to avoid header-injection
# issues.
subject = "".join(subject.splitlines())
message = render_to_string(
template_name=self.email_body_template,
context=context,
request=self.request,
)
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
class ActivationView(BaseActivationView):
"""
Given a valid activation key, activate the user's
account. Otherwise, show an error message stating the account
couldn't be activated.
"""
ALREADY_ACTIVATED_MESSAGE = _(
"The account you tried to activate has already been activated."
)
BAD_USERNAME_MESSAGE = _("The account you attempted to activate is invalid.")
EXPIRED_MESSAGE = _("This account has expired.")
INVALID_KEY_MESSAGE = _("The activation key you provided is invalid.")
success_url = reverse_lazy("django_registration_activation_complete")
def activate(self, *args, **kwargs):
username = self.validate_key(kwargs.get("activation_key"))
user = self.get_user(username)
user.is_active = True
user.save()
return user
def validate_key(self, activation_key):
"""
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or raising ``ActivationError`` if not.
"""
try:
username = signing.loads(
activation_key,
salt=REGISTRATION_SALT,
max_age=settings.ACCOUNT_ACTIVATION_DAYS * 86400,
)
return username
except signing.SignatureExpired:
raise ActivationError(self.EXPIRED_MESSAGE, code="expired")
except signing.BadSignature:
raise ActivationError(
self.INVALID_KEY_MESSAGE,
code="invalid_key",
params={"activation_key": activation_key},
)
def get_user(self, username):
"""
Given the verified username, look up and return the
corresponding user account if it exists, or raising
``ActivationError`` if it doesn't.
"""
User = get_user_model()
try:
user = User.objects.get(**{User.USERNAME_FIELD: username})
if user.is_active:
raise ActivationError(
self.ALREADY_ACTIVATED_MESSAGE, code="already_activated"
)
return user
except User.DoesNotExist:
raise ActivationError(self.BAD_USERNAME_MESSAGE, code="bad_username")
|
the-stack_106_27264 | import logging
from galaxy.web.form_builder import SelectField
from tool_shed.util import hg_util, metadata_util
log = logging.getLogger( __name__ )
def build_approved_select_field( trans, name, selected_value=None, for_component=True ):
options = [ ( 'No', trans.model.ComponentReview.approved_states.NO ),
( 'Yes', trans.model.ComponentReview.approved_states.YES ) ]
if for_component:
options.append( ( 'Not applicable', trans.model.ComponentReview.approved_states.NA ) )
if selected_value is None:
selected_value = trans.model.ComponentReview.approved_states.NA
select_field = SelectField( name=name )
for option_tup in options:
selected = selected_value and option_tup[ 1 ] == selected_value
select_field.add_option( option_tup[ 0 ], option_tup[ 1 ], selected=selected )
return select_field
def build_changeset_revision_select_field( trans, repository, selected_value=None, add_id_to_name=True,
downloadable=False, reviewed=False, not_reviewed=False ):
"""
Build a SelectField whose options are the changeset_rev strings of certain revisions of the
received repository.
"""
options = []
changeset_tups = []
refresh_on_change_values = []
if downloadable:
# Restrict the options to downloadable revisions.
repository_metadata_revisions = repository.downloadable_revisions
elif reviewed:
# Restrict the options to revisions that have been reviewed.
repository_metadata_revisions = []
metadata_changeset_revision_hashes = []
for metadata_revision in repository.metadata_revisions:
metadata_changeset_revision_hashes.append( metadata_revision.changeset_revision )
for review in repository.reviews:
if review.changeset_revision in metadata_changeset_revision_hashes:
repository_metadata_revisions.append( review.repository_metadata )
elif not_reviewed:
# Restrict the options to revisions that have not yet been reviewed.
repository_metadata_revisions = []
reviewed_metadata_changeset_revision_hashes = []
for review in repository.reviews:
reviewed_metadata_changeset_revision_hashes.append( review.changeset_revision )
for metadata_revision in repository.metadata_revisions:
if metadata_revision.changeset_revision not in reviewed_metadata_changeset_revision_hashes:
repository_metadata_revisions.append( metadata_revision )
else:
# Restrict the options to all revisions that have associated metadata.
repository_metadata_revisions = repository.metadata_revisions
for repository_metadata in repository_metadata_revisions:
rev, label, changeset_revision = \
hg_util.get_rev_label_changeset_revision_from_repository_metadata( trans.app,
repository_metadata,
repository=repository,
include_date=True,
include_hash=False )
changeset_tups.append( ( rev, label, changeset_revision ) )
refresh_on_change_values.append( changeset_revision )
# Sort options by the revision label. Even though the downloadable_revisions query sorts by update_time,
# the changeset revisions may not be sorted correctly because setting metadata over time will reset update_time.
for changeset_tup in sorted( changeset_tups ):
# Display the latest revision first.
options.insert( 0, ( changeset_tup[ 1 ], changeset_tup[ 2 ] ) )
if add_id_to_name:
name = 'changeset_revision_%d' % repository.id
else:
name = 'changeset_revision'
select_field = SelectField( name=name,
refresh_on_change=True,
refresh_on_change_values=refresh_on_change_values )
for option_tup in options:
selected = selected_value and option_tup[ 1 ] == selected_value
select_field.add_option( option_tup[ 0 ], option_tup[ 1 ], selected=selected )
return select_field
def filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components( trans, repository ):
"""
Inspect the latest downloadable changeset revision for the received repository to see if it
includes tools that are either missing functional tests or functional test data. If the
changset revision includes tools but is missing tool test components, return the changeset
revision hash. This will filter out repositories of type repository_suite_definition and
tool_dependency_definition.
"""
repository_metadata = get_latest_downloadable_repository_metadata_if_it_includes_tools( trans, repository )
if repository_metadata is not None \
and repository_metadata.missing_test_components:
return repository_metadata.changeset_revision
return None
def filter_by_latest_metadata_changeset_revision_that_has_invalid_tools( trans, repository ):
"""
Inspect the latest changeset revision with associated metadata for the received repository
to see if it has invalid tools. This will filter out repositories of type repository_suite_definition
and tool_dependency_definition.
"""
repository_metadata = get_latest_repository_metadata_if_it_includes_invalid_tools( trans, repository )
if repository_metadata is not None:
return repository_metadata.changeset_revision
return None
def get_latest_downloadable_repository_metadata( trans, repository ):
"""
Return the latest downloadable repository_metadata record for the received repository. This will
return repositories of type unrestricted as well as types repository_suite_definition and
tool_dependency_definition.
"""
encoded_repository_id = trans.security.encode_id( repository.id )
repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
tip_ctx = str( repo.changectx( repo.changelog.tip() ) )
repository_metadata = None
try:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision( trans.app, encoded_repository_id, tip_ctx )
if repository_metadata is not None and repository_metadata.downloadable:
return repository_metadata
return None
except:
latest_downloadable_revision = metadata_util.get_previous_metadata_changeset_revision( repository,
repo,
tip_ctx,
downloadable=True )
if latest_downloadable_revision == hg_util.INITIAL_CHANGELOG_HASH:
return None
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision( trans.app,
encoded_repository_id,
latest_downloadable_revision )
if repository_metadata is not None and repository_metadata.downloadable:
return repository_metadata
return None
def get_latest_downloadable_repository_metadata_if_it_includes_tools( trans, repository ):
"""
Return the latest downloadable repository_metadata record for the received repository if its
includes_tools attribute is True. This will filter out repositories of type repository_suite_definition
and tool_dependency_definition.
"""
repository_metadata = get_latest_downloadable_repository_metadata( trans, repository )
if repository_metadata is not None and repository_metadata.includes_tools:
return repository_metadata
return None
def get_latest_repository_metadata( trans, repository ):
"""
Return the latest repository_metadata record for the received repository if it exists. This will
return repositories of type unrestricted as well as types repository_suite_definition and
tool_dependency_definition.
"""
encoded_repository_id = trans.security.encode_id( repository.id )
repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )
tip_ctx = str( repo.changectx( repo.changelog.tip() ) )
try:
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision( trans.app, encoded_repository_id, tip_ctx )
return repository_metadata
except:
latest_downloadable_revision = metadata_util.get_previous_metadata_changeset_revision( repository,
repo,
tip_ctx,
downloadable=False )
if latest_downloadable_revision == hg_util.INITIAL_CHANGELOG_HASH:
return None
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision( trans.app,
encoded_repository_id,
latest_downloadable_revision )
return repository_metadata
def get_latest_repository_metadata_if_it_includes_invalid_tools( trans, repository ):
"""
Return the latest repository_metadata record for the received repository that contains invalid
tools if one exists. This will filter out repositories of type repository_suite_definition and
tool_dependency_definition.
"""
repository_metadata = get_latest_repository_metadata( trans, repository )
if repository_metadata is not None:
metadata = repository_metadata.metadata
if metadata is not None and 'invalid_tools' in metadata:
return repository_metadata
return None
|
the-stack_106_27265 | # -*- coding: utf-8 -*-
# @Time : 19-8-21 上午11:29
# @Author : Redtree
# @File : r13.py
# @Desc : 狼人
from data import skills
DATA = {
'code':12,
'name':'狼人',
'attack':3,
'defend':0,
'hp':8,
'skill':skills.DATA[13],
'desc':'一个人的夜我的心,应该放在哪里'
} |
the-stack_106_27266 | # -*- coding: utf-8 -*-
import sys
from pyqtgraph.Qt import QtCore, QtGui
from visualizer import NMF4DVisualizer
from nmf import *
from generator import *
def main():
# params
# =========================
N = 10
M = 50
K = 3
T = 10
lr_pgd1 = 0.02
lr_pgd2 = 0.15
nmf_iter = 100
nmf_clk = 50
noise_coeff = 0
seed = 5
sampler = runif_matrix_normalized # X ~ U(0, 1)
# sampler = prnorm_matrix_normalized # X' ~ N(0, a^2), X = max(X', eps)
torch.manual_seed(seed)
# models
# =========================
initializer = MUNMF(K=K, T=0)
MU = MUNMF(K=K, T=1)
PGD1 = PGDNMF(K=K, T=1, eta=lr_pgd1, order=1)
PGD2 = PGDNMF(K=K, T=1, eta=lr_pgd2, order=2)
initializer.eval()
MU.eval()
PGD1.eval()
PGD2.eval()
v = NMF4DVisualizer()
# draw the ground truth matrix
# =========================
X, D_gt, C_gt = sampler(N, M, K, noise_coeff=noise_coeff)
v.connect_vertices(D_gt, width=5, color=(1, 0.2, 1, 1))
# draw X as points
# each point corresponds to each column vector of X
v.draw_X(X)
# initialize factor matrices
Dinit, Cinit = initializer(X)
# draw D0
v.connect_vertices(Dinit, color=(.8, .8, .8, 1))
# draw updating process of each model
# =========================
def make_prediction_data(update_D, update_C, iter=50):
Ds = []
Dpred, Cpred = Dinit, Cinit
for i in range(iter):
Dpred = update_D(X=X, D=Dpred, C=Cpred)
Dpred = normalize_col(Dpred)
Cpred = update_C(X=X, D=Dpred, C=Cpred)
Cpred = normalize_col(Cpred)
Ds.append(Dpred.detach().numpy())
return np.array(Ds)
# MU
# ----------
D_MU = make_prediction_data(MU.update_D, MU.update_C, nmf_iter)
v.draw_model_history(D_MU, rgb=(.8, .4, .2), clock=nmf_clk)
# 1st-order PGD
# ----------
D_PGD1 = make_prediction_data(PGD1.update_D, PGD1.update_C, nmf_iter)
v.draw_model_history(D_PGD1, rgb=(.5, .9, .8), clock=nmf_clk)
# 2nd-order PGD
# ----------
D_PGD2 = make_prediction_data(PGD2.update_D, PGD2.update_C, nmf_iter)
v.draw_model_history(D_PGD2, rgb=(.5, .8, .3), clock=nmf_clk)
# random
# ----------
# Ds = []
# for i in range(30):
# D = np.random.random((4, K))
# D = D / np.sum(D, axis=0)
# Ds.append(D)
# Ds = np.array(Ds)
# v.draw_model_history(Ds, rgb=(.4, .8, .2), clock=50)
# Start Qt event loop unless running in interactive mode.
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
main()
|
the-stack_106_27267 | # SPDX-FileCopyrightText: 2021 easyCore contributors <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
from easyCore.Symmetry.SymOp import SymmOp
class Twin:
def __init__(self, origin=(0, 0, 0), theta: float = 0.0, phi: float = 0.0):
self.theta = theta
self.phi = phi
self.origin = list(origin)
self.axis1 = [1, 0, 0]
self.axis2 = [0, 0, 1]
@property
def operation(self):
return SymmOp.from_origin_axis_angle(self.origin, self.axis1, self.phi) * \
SymmOp.from_origin_axis_angle(self.origin, self.axis2, self.theta)
|
the-stack_106_27268 | import numpy as np
from sampling import madowSampling
from scipy.special import comb
from decimal import *
from sacred import Experiment
from config import initialise
from easydict import EasyDict as edict
getcontext().prec = 100
ex = Experiment()
ex = initialise(ex)
class sageHedge:
def __init__(self, args):
self.args = args
self.eta = args.eta
self.N = args.N
self.files = np.arange(args.N)
self.k = args.k
self.weights = args.weights
self.R = args.R
if self.args.method == "iterative":
self.R = args.R
self.a = args.a
self.W = args.W
self.mul_hedge = Decimal(np.exp(self.eta))
self.div_hedge = Decimal(np.exp(-self.eta))
self.mulvec = np.power(self.div_hedge, np.flip(range(self.N - self.k + 1), axis=0))
def initialize(self):
self.weights = np.ones(self.N)
self.R = np.zeros(self.N)
if self.args.method == "iterative":
self.W = np.ones((self.N, self.N - self.k + 1), dtype=Decimal)
one = Decimal(1)
self.inv_weights = [one for j in range(self.N)]
self.a = np.zeros(self.N - self.k + 1, dtype=Decimal)
for j in range(self.N - self.k + 1):
self.a[j] = (-1) ** (self.N - j) * comb(self.N, j, exact=True)
def get_normalized_weights(self):
probs = self.weights / np.sum(self.weights)
return probs
def get_kset_direct(self, y):
onehot_y = np.zeros(self.N)
onehot_y[y] = 1
delta = np.exp(self.eta * onehot_y)
self.weights = self.weights * delta
K = self.elementary_symmetric_polynomial(self.weights, self.k)
p = np.zeros(self.N)
for i in range(self.N):
W_i = np.delete(self.weights, i)
p[i] = (self.weights[i] * self.elementary_symmetric_polynomial(W_i, self.k - 1)) / K
return p, madowSampling(self.N, p, self.k)
def elementary_symmetric_polynomial(self, X, k):
X_ = np.zeros(self._next_power_of_2(len(X)), dtype=np.float64)
X_[:len(X)] = X
W = np.ones_like(X_, dtype=np.float64)
X_ = np.vstack((W, X_)).T
K = X_.shape[0]
while K > 1:
X_temp = []
for i in range(0, K, 2):
x, y = list(X_[i]), list(X_[i + 1])
X_temp.append(np.polymul(x, y)[:k + 1])
X_ = np.asarray(X_temp)
K = K // 2
return X_.flatten()[k]
def get_kset_iterative(self, y):
p = np.matmul(self.W, self.a) / self.a[self.N - self.k]
if np.abs(p.sum() - self.k) > 1e-4:
print(p.sum())
S = madowSampling(self.N, p, self.k)
a_new = np.zeros(self.N - self.k + 1, dtype=Decimal)
a_new[0] = self.mul_hedge * self.a[0]
for i in range(1, self.N - self.k + 1):
a_new[i] = self.mul_hedge * self.a[i] + self.inv_weights[y] * (a_new[i - 1] - self.a[i - 1])
self.inv_weights[y] = self.inv_weights[y] * self.div_hedge
self.W[y, :] = np.multiply(self.W[y, :], self.mulvec)
self.a = a_new
return p, S
def get_kset_large(self, y):
# To overcome the numerical precision issues while dealing with large datasets, the following hack
# is utilised. We use the fact that FTPL with Gumbel Noise is equivalent to Hedge in expectation.
# Cite: http://proceedings.mlr.press/v35/abernethy14.pdf
# Although, the equivalence is not exact in our case (as instead of a single expert, we deal with
# a set of experts), the hack works exceptionally well for practical purposes.
perturbed_R = self.R + np.random.gumbel(scale=1. / self.eta)
kset = np.argsort(perturbed_R)[::-1][:self.k]
self.R[y] += 1
return None, kset
def get_kset(self, y):
p, kset = None, None
if self.args.method == "large":
p, kset = self.get_kset_large(y)
if self.args.method == "iterative":
p, kset = self.get_kset_iterative(y)
if self.args.method == "direct":
p, kset = self.get_kset_direct(y)
return p, kset
def _next_power_of_2(self, n):
count = 0
if n and not (n & (n - 1)):
return n
while n != 0:
n >>= 1
count += 1
return 1 << count
@ex.automain
def main(_run):
args = edict(_run.config)
hedge = sageHedge(args)
hedge.initialize()
for t in range(args.T):
y = np.random.randint(args.N)
p, kset = hedge.get_kset(y)
if p is None:
print(t, y, kset)
else:
print(t, y, p.sum(), kset)
|
the-stack_106_27270 | # Python program to find largest, smallest,
# second largest and second smallest in a
# list with complexity O(n)
def Range(list1):
#largest = list1[0]
lowest = list1[0]
#largest2 = None
lowest2 = None
for item in list1[1:]:
#if item > largest:
#largest2 = largest
#largest = item
#elif largest2 == None or largest2 < item:
#largest2 = item
if item < lowest:
lowest2 = lowest
lowest = item
elif lowest2 == None or lowest2 > item:
lowest2 = item
#print("Largest element is:", largest)
#print("Smallest element is:", lowest)
#print("Second Largest element is:", largest2)
print("Second Smallest element is:", lowest2)
# Driver Code
list1 = [12, 45, 2, 41, 31, 10, 8, 6, 4]
Range(list1)
|
the-stack_106_27271 | #!/usr/bin/env python
"""Tests for grr.parsers.sqlite_file."""
import os
import StringIO
from grr.lib import flags
from grr.parsers import sqlite_file
from grr.test_lib import test_lib
class SQLiteFileTest(test_lib.GRRBaseTest):
"""Test parsing of sqlite database files."""
query = "SELECT * FROM moz_places;"
def testErrors(self):
"""Test empty files don't raise errors."""
database_file = sqlite_file.SQLiteFile(StringIO.StringIO())
entries = [x for x in database_file.Query(self.query)]
self.assertEqual(len(entries), 0)
# The places.sqlite contains 92 rows in table moz_places
def testTmpFiles(self):
"""This should force a write to a tmp file."""
filename = os.path.join(self.base_path, "places.sqlite")
file_stream = StringIO.StringIO(open(filename, "rb").read())
database_file = sqlite_file.SQLiteFile(file_stream)
entries = [x for x in database_file.Query(self.query)]
self.assertEqual(len(entries), 92)
# Test the tempfile is deleted
self.assertEqual(database_file._delete_file, True)
filename = database_file.name
self.assertTrue(os.path.exists(filename))
del database_file
self.assertFalse(os.path.exists(filename))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_106_27275 | #!/usr/bin/env python
# 1 dragon with 1 point light
# Copyright (c) 2011-2020 Hiroshi Tsubokawa
import fujiyama
si = fujiyama.SceneInterface()
#plugins
si.OpenPlugin('constant_shader', 'ConstantShader')
si.OpenPlugin('plastic_shader', 'PlasticShader')
si.OpenPlugin('stanfordply_procedure', 'StanfordPlyProcedure')
#Camera
si.NewCamera('cam1', 'PerspectiveCamera')
si.SetSampleProperty3('cam1', 'translate', 0, 2, 6, 0)
si.SetSampleProperty3('cam1', 'translate', 0, 2, 5.3, 1)
si.SetSampleProperty3('cam1', 'rotate', -10, 0, 0, 0)
#Light
si.NewLight('light1', 'PointLight')
si.SetProperty3('light1', 'translate', -10, 12, 10)
#Texture
si.NewTexture('tex1', '../../hdr/ennis.hdr')
#Shader
si.NewShader('dragon_shader', 'plastic_shader')
si.SetProperty3('dragon_shader', 'diffuse', .1, .4, .0)
si.NewShader('floor_shader', 'plastic_shader')
si.SetProperty3('floor_shader', 'diffuse', .2, .25, .3)
si.NewShader('dome_shader', 'constant_shader')
si.SetProperty3('dome_shader', 'diffuse', .8, .8, .8)
si.AssignTexture('dome_shader', 'texture', 'tex1')
#Mesh
si.NewMesh('dragon_mesh')
si.NewMesh('floor_mesh')
si.NewMesh('dome_mesh')
#Procedure
si.NewProcedure('dragon_proc', 'stanfordply_procedure')
si.AssignMesh('dragon_proc', 'mesh', 'dragon_mesh')
si.SetStringProperty('dragon_proc', 'filepath', '../../ply/dragon.ply')
si.SetStringProperty('dragon_proc', 'io_mode', 'r')
si.RunProcedure('dragon_proc')
si.NewProcedure('floor_proc', 'stanfordply_procedure')
si.AssignMesh('floor_proc', 'mesh', 'floor_mesh')
si.SetStringProperty('floor_proc', 'filepath', '../../ply/floor.ply')
si.SetStringProperty('floor_proc', 'io_mode', 'r')
si.RunProcedure('floor_proc')
si.NewProcedure('dome_proc', 'stanfordply_procedure')
si.AssignMesh('dome_proc', 'mesh', 'dome_mesh')
si.SetStringProperty('dome_proc', 'filepath', '../../ply/dome.ply')
si.SetStringProperty('dome_proc', 'io_mode', 'r')
si.RunProcedure('dome_proc')
#ObjectInstance
si.NewObjectInstance('dragon1', 'dragon_mesh')
si.AssignShader('dragon1', 'DEFAULT_SHADING_GROUP', 'dragon_shader')
si.SetProperty3('dragon1', 'rotate', 0, -90, 0)
si.SetProperty3('dragon1', 'scale', .5, .5, .5)
si.NewObjectInstance('floor1', 'floor_mesh')
si.AssignShader('floor1', 'DEFAULT_SHADING_GROUP', 'floor_shader')
si.NewObjectInstance('dome1', 'dome_mesh')
si.AssignShader('dome1', 'DEFAULT_SHADING_GROUP', 'dome_shader')
#ObjectGroup
si.NewObjectGroup('group1')
si.AddObjectToGroup('group1', 'dragon1')
si.AssignObjectGroup('dragon1', 'shadow_target', 'group1')
si.AssignObjectGroup('floor1', 'shadow_target', 'group1')
#FrameBuffer
si.NewFrameBuffer('fb1', 'rgba')
#Renderer
si.NewRenderer('ren1')
si.AssignCamera('ren1', 'cam1')
si.AssignFrameBuffer('ren1', 'fb1')
si.SetProperty2('ren1', 'resolution', 640, 480)
#si.SetProperty2('ren1', 'resolution', 160, 120)
si.SetProperty2('ren1', 'pixelsamples', 9, 9)
#Rendering
si.RenderScene('ren1')
#Output
si.SaveFrameBuffer('fb1', '../camera_motion_blur.fb')
#Run commands
si.Run()
#si.Print()
|
the-stack_106_27278 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.pipeline.policies import ContentDecodePolicy
from azure.core.pipeline.policies import SansIOHTTPPolicy
from ._models import TextDocumentBatchStatistics
from ._lro import _FINISHED
class TextAnalyticsResponseHookPolicy(SansIOHTTPPolicy):
def __init__(self, **kwargs):
self._response_callback = kwargs.get("raw_response_hook")
self._is_lro = None
super().__init__()
def on_request(self, request):
self._response_callback = request.context.options.pop(
"raw_response_hook", self._response_callback
)
def on_response(self, request, response):
if self._is_lro is None:
# determine LRO based off of initial response. If 202, we say it's an LRO
self._is_lro = response.http_response.status_code == 202
if self._response_callback:
data = ContentDecodePolicy.deserialize_from_http_generics(
response.http_response
)
if self._is_lro and (not data or data.get("status", "").lower() not in _FINISHED):
return
if response.http_response.status_code == 429:
return
if data:
inner = data.get("results", data) # language API compat
statistics = inner.get("statistics", None)
model_version = inner.get("modelVersion", None)
batch_statistics = TextDocumentBatchStatistics._from_generated( # pylint: disable=protected-access
statistics
)
response.statistics = batch_statistics
response.model_version = model_version
response.raw_response = data
self._response_callback(response)
|
the-stack_106_27279 | from tkinter import *
ventana= Tk()
ventana.geometry("700x400")
ventana.title("Formularios en Tkinter | Pilar Goonzález")
# Texto Encabezado
encabezado = Label(ventana, text="Formularios con Tkinter - Pilar G")
encabezado.config(
fg="white",
bg="darkgray",
font=("Open Sans",18),
padx=10,
pady=10
)
# Grid de 12 columnas(en el columnspan inicial)
encabezado.grid(row=0,column=0,columnspan=12,sticky=W)
# Label para el campo (nombre)
label = Label(ventana, text="Nombre")
label.grid(row=1,column=0,sticky=W,padx=5, pady=5)
# Campo de texto (nombre)
campo_texto = Entry(ventana)
# sticky fija el campo de texto a la izquierda
campo_texto.grid(row=1,column=1,sticky=W,padx=5,pady=5)
campo_texto.config(justify="right",state="normal")
# Label para el campo (apellidos)
label = Label(ventana, text="Apellidos")
label.grid(row=2,column=0,sticky=W,padx=5, pady=5)
# Campo de texto (apellidos)
campo_texto = Entry(ventana)
# sticky fija el campo de texto a la izquierda
campo_texto.grid(row=2,column=1,sticky=W,padx=5,pady=5)
# Justificado derecha; el texto ingresado aparece desde la derecha, state: disabled (ex)
campo_texto.config(justify="right",state="normal")
# Label para el campo (descripción)
label = Label(ventana, text="Descripción")
label.grid(row=3,column=0,sticky=W,padx=5, pady=5)
# Campo de texto GRANDE (descripción)
campo_grande = Text(ventana)
campo_grande.grid(row=3, column=1,sticky=N,padx=5, pady=5)
campo_grande.config(
width=30,
height=5,
font=("Arial",12),
padx=15,
pady=15
)
# Botón
Label(ventana).grid(row=4,column=1)
boton = Button(ventana,text="Enviar")
boton.grid(row=5,column=1,sticky=W)
boton.config(padx=15,pady=10,bg="green",fg="white")
ventana.mainloop() |
the-stack_106_27282 | """ Represents a bundle. In the words of the Apple docs, it's a convenient way to deliver
software. Really it's a particular kind of directory structure, with one main executable,
well-known places for various data files and libraries,
and tracking hashes of all those files for signing purposes.
For isign, we have two main kinds of bundles: the App, and the Framework (a reusable
library packaged along with its data files.) An App may contain many Frameworks, but
a Framework has to be re-signed independently.
See the Apple Developer Documentation "About Bundles" """
import glob
import logging
import os
import shutil
import copy
from os.path import basename, exists, join, splitext
import biplist
from . import code_resources, signable
from .exceptions import NotMatched
from .signer import openssl_command
from .utils import PY3
log = logging.getLogger(__name__)
def is_info_plist_native(plist):
""" If an bundle is for native iOS, it has these properties in the Info.plist """
key = 'CFBundleSupportedPlatforms'
value = 'iPhoneOS'
if PY3:
key = key.encode()
value = value.encode()
return key in plist and value in plist[key]
class Bundle(object):
""" A bundle is a standard directory structure, a signable, installable set of files.
Apps are Bundles, but so are some kinds of Frameworks (libraries) """
helpers = []
signable_class = None
def __init__(self, path):
self.path = path
self.info_path = join(self.path, 'Info.plist')
if not exists(self.info_path):
raise NotMatched("no Info.plist found; probably not a bundle")
self.info = biplist.readPlist(self.info_path)
self.orig_info = None
if not is_info_plist_native(self.info):
raise NotMatched("not a native iOS bundle")
# will be added later
self.seal_path = None
def get_executable_path(self):
""" Path to the main executable. For an app, this is app itself. For
a Framework, this is the main framework """
executable_name = None
if 'CFBundleExecutable' in self.info:
executable_name = self.info['CFBundleExecutable']
else:
executable_name, _ = splitext(basename(self.path))
executable = join(self.path, executable_name)
if not exists(executable):
raise Exception(
'could not find executable for {0}'.format(self.path))
return executable
def update_info_props(self, new_props):
if self.orig_info is None:
self.orig_info = copy.deepcopy(self.info)
changed = False
if ('CFBundleIdentifier' in new_props and
'CFBundleURLTypes' in self.info and
'CFBundleURLTypes' not in new_props):
# The bundle identifier changed. Check CFBundleURLTypes for
# CFBundleURLName values matching the old bundle
# id if it's not being set explicitly
old_bundle_id = self.info['CFBundleIdentifier']
new_bundle_id = new_props['CFBundleIdentifier']
for url_type in self.info['CFBundleURLTypes']:
if 'CFBundleURLName' not in url_type:
continue
if url_type['CFBundleURLName'] == old_bundle_id:
url_type['CFBundleURLName'] = new_bundle_id
changed = True
for key, val in new_props.items():
is_new_key = key not in self.info
if is_new_key or self.info[key] != val:
if is_new_key:
log.warn("Adding new Info.plist key: {}".format(key))
self.info[key] = val
changed = True
if changed:
biplist.writePlist(self.info, self.info_path, binary=True)
else:
self.orig_info = None
def info_props_changed(self):
return self.orig_info is not None
def info_prop_changed(self, key):
if not self.orig_info:
# No props have been changed
return False
if key in self.info and key in self.orig_info and self.info[key] == self.orig_info[key]:
return False
return True
def get_info_prop(self, key):
return self.info[key]
def sign_dylibs(self, signer, path):
""" Sign all the dylibs in this directory """
for dylib_path in glob.glob(join(path, '*.dylib')):
dylib = signable.Dylib(self, dylib_path)
dylib.sign(self, signer)
def sign(self, signer):
""" Sign everything in this bundle, recursively with sub-bundles """
# log.debug("SIGNING: %s" % self.path)
frameworks_path = join(self.path, 'Frameworks')
if exists(frameworks_path):
# log.debug("SIGNING FRAMEWORKS: %s" % frameworks_path)
# sign all the frameworks
for framework_name in os.listdir(frameworks_path):
framework_path = join(frameworks_path, framework_name)
# log.debug("checking for framework: %s" % framework_path)
try:
framework = Framework(framework_path)
# log.debug("resigning: %s" % framework_path)
framework.resign(signer)
except NotMatched:
# log.debug("not a framework: %s" % framework_path)
continue
# sign all the dylibs under Frameworks
self.sign_dylibs(signer, frameworks_path)
# sign any dylibs in the main directory (rare, but it happens)
self.sign_dylibs(signer, self.path)
plugins_path = join(self.path, 'PlugIns')
if exists(plugins_path):
# sign the appex executables
appex_paths = glob.glob(join(plugins_path, '*.appex'))
for appex_path in appex_paths:
plist_path = join(appex_path, 'Info.plist')
if not exists(plist_path):
continue
plist = biplist.readPlist(plist_path)
appex_exec_path = join(appex_path, plist['CFBundleExecutable'])
appex = signable.Appex(self, appex_exec_path)
appex.sign(self, signer)
# then create the seal
# TODO maybe the app should know what its seal path should be...
self.seal_path = code_resources.make_seal(self.get_executable_path(),
self.path)
# then sign the app
executable = self.signable_class(self, self.get_executable_path())
executable.sign(self, signer)
def resign(self, signer):
""" signs bundle, modifies in place """
self.sign(signer)
log.debug("Resigned bundle at <%s>", self.path)
class Framework(Bundle):
""" A bundle that comprises reusable code. Similar to an app in that it has
its own resources and metadata. Not like an app because the main executable
doesn't have Entitlements, or an Application hash, and it doesn't have its
own provisioning profile. """
# the executable in this bundle will be a Framework
signable_class = signable.Framework
def __init__(self, path):
super(Framework, self).__init__(path)
class App(Bundle):
""" The kind of bundle that is visible as an app to the user.
Contains the provisioning profile, entitlements, etc. """
# the executable in this bundle will be an Executable (i.e. the main
# executable of an app)
signable_class = signable.Executable
def __init__(self, path):
super(App, self).__init__(path)
self.entitlements_path = join(self.path,
'Entitlements.plist')
self.provision_path = join(self.path,
'embedded.mobileprovision')
def provision(self, provision_path):
shutil.copyfile(provision_path, self.provision_path)
@staticmethod
def extract_entitlements(provision_path):
""" Given a path to a provisioning profile, return the entitlements
encoded therein """
cmd = [
'smime',
'-inform', 'der',
'-verify', # verifies content, prints verification status to STDERR,
# outputs content to STDOUT. In our case, will be an XML plist
'-noverify', # accept self-signed certs. Not the opposite of -verify!
'-in', provision_path
]
# this command always prints 'Verification successful' to stderr.
(profile_text, err) = openssl_command(cmd, data=None, expect_err=True)
if err and err.strip() != 'Verification successful':
log.error('Received unexpected error from openssl: {}'.format(err))
plist_dict = biplist.readPlistFromString(profile_text)
if 'Entitlements' not in plist_dict:
log.debug('failed to get entitlements in provisioning profile')
raise Exception('could not find Entitlements in {}'.format(provision_path))
return plist_dict['Entitlements']
def write_entitlements(self, signer, provisioning_path):
""" Given a path to a provisioning profile, write its entitlements to
self.entitlements_path """
entitlements = self.extract_entitlements(provisioning_path)
biplist.writePlist(entitlements, self.entitlements_path, binary=False)
log.debug("wrote Entitlements to {0}".format(self.entitlements_path))
def resign(self, signer, provisioning_profile):
""" signs app in place """
# copy the provisioning profile in
self.provision(provisioning_profile)
# Add entitlements from the pprof into the app
self.write_entitlements(signer, provisioning_profile)
# actually resign this bundle now
super(App, self).resign(signer)
|
the-stack_106_27284 | from django.urls import path
# views(url for home page)
from . import views
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search'),
]
|
the-stack_106_27285 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetNetworkConfigurationDnsSettings(Model):
"""Describes a virtual machines scale sets network configuration's DNS
settings.
:param dns_servers: List of DNS servers IP addresses
:type dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetNetworkConfigurationDnsSettings, self).__init__(**kwargs)
self.dns_servers = kwargs.get('dns_servers', None)
|
the-stack_106_27287 | ###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ....lib.gstreamer.vaapi.util import *
from ....lib.gstreamer.vaapi.decoder import DecoderTest
spec = load_test_spec("vp8", "decode")
@slash.requires(*platform.have_caps("decode", "vp8"))
@slash.requires(*have_gst_element("vaapivp8dec"))
class default(DecoderTest):
def before(self):
# default metric
self.metric = dict(type = "ssim", miny = 1.0, minu = 1.0, minv = 1.0)
self.caps = platform.get_caps("decode", "vp8")
super(default, self).before()
@slash.parametrize(("case"), sorted(spec.keys()))
def test(self, case):
vars(self).update(spec[case].copy())
dxmap = {".ivf" : "ivfparse", ".webm" : "matroskademux"}
ext = os.path.splitext(self.source)[1]
assert ext in dxmap.keys(), "Unrecognized source file extension {}".format(ext)
vars(self).update(
case = case,
gstdecoder = "{} ! vaapivp8dec".format(dxmap[ext]),
)
self.decode()
|
the-stack_106_27288 | """Django settings for Pontoon."""
from __future__ import absolute_import
import re
import os
import socket
from django.utils.functional import lazy
import dj_database_url
_dirname = os.path.dirname
ROOT = _dirname(_dirname(_dirname(os.path.abspath(__file__))))
def path(*args):
return os.path.join(ROOT, *args)
# Environment-dependent settings. These are loaded from environment
# variables.
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ["SECRET_KEY"]
# Is this a dev instance?
DEV = os.environ.get("DJANGO_DEV", "False") != "False"
DEBUG = os.environ.get("DJANGO_DEBUG", "False") != "False"
HEROKU_DEMO = os.environ.get("HEROKU_DEMO", "False") != "False"
# Automatically log in the user with username 'AUTO_LOGIN_USERNAME'
# and password 'AUTO_LOGIN_PASSWORD'
AUTO_LOGIN = os.environ.get("AUTO_LOGIN", "False") != "False"
AUTO_LOGIN_USERNAME = os.environ.get("AUTO_LOGIN_USERNAME", None)
AUTO_LOGIN_PASSWORD = os.environ.get("AUTO_LOGIN_PASSWORD", None)
LOGOUT_REDIRECT_URL = "/"
ADMINS = MANAGERS = (
(os.environ.get("ADMIN_NAME", ""), os.environ.get("ADMIN_EMAIL", "")),
)
# A list of project manager email addresses to send project requests to
PROJECT_MANAGERS = os.environ.get("PROJECT_MANAGERS", "").split(",")
DATABASES = {
"default": dj_database_url.config(default="mysql://root@localhost/pontoon")
}
# Ensure that psycopg2 uses a secure SSL connection.
if not DEV and not DEBUG:
if "OPTIONS" not in DATABASES["default"]:
DATABASES["default"]["OPTIONS"] = {}
DATABASES["default"]["OPTIONS"]["sslmode"] = "require"
FRONTEND_DIR = os.path.join(ROOT, "frontend")
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.environ.get("STATIC_ROOT", path("static"))
# Optional CDN hostname for static files, e.g. '//asdf.cloudfront.net'
STATIC_HOST = os.environ.get("STATIC_HOST", "")
SESSION_COOKIE_HTTPONLY = os.environ.get("SESSION_COOKIE_HTTPONLY", "True") != "False"
SESSION_COOKIE_SECURE = os.environ.get("SESSION_COOKIE_SECURE", "True") != "False"
APP_URL_KEY = "APP_URL"
SITE_URL = os.environ.get("SITE_URL", "http://localhost:8000")
# Custom LD_LIBRARY_PATH environment variable for SVN
SVN_LD_LIBRARY_PATH = os.environ.get("SVN_LD_LIBRARY_PATH", "")
# URL to the RabbitMQ server
BROKER_URL = os.environ.get("RABBITMQ_URL", None)
# Google Cloud Translation API key
GOOGLE_TRANSLATE_API_KEY = os.environ.get("GOOGLE_TRANSLATE_API_KEY", "")
# Microsoft Translator API Key
MICROSOFT_TRANSLATOR_API_KEY = os.environ.get("MICROSOFT_TRANSLATOR_API_KEY", "")
# Google Analytics Key
GOOGLE_ANALYTICS_KEY = os.environ.get("GOOGLE_ANALYTICS_KEY", "")
# Raygun.io configuration
RAYGUN4PY_CONFIG = {"api_key": os.environ.get("RAYGUN_APIKEY", "")}
# Email settings
EMAIL_HOST_USER = os.environ.get("SENDGRID_USERNAME", "")
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_PASSWORD = os.environ.get("SENDGRID_PASSWORD", "")
# Log emails to console if the SendGrid credentials are missing.
if EMAIL_HOST_USER and EMAIL_HOST_PASSWORD:
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
else:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Environment-independent settings. These shouldn't have to change
# between server environments.
ROOT_URLCONF = "pontoon.urls"
INSTALLED_APPS = (
"pontoon.actionlog",
"pontoon.administration",
"pontoon.base",
"pontoon.contributors",
"pontoon.checks",
"pontoon.in_context",
"pontoon.localizations",
"pontoon.machinery",
"pontoon.projects",
"pontoon.sync",
"pontoon.tags",
"pontoon.teams",
"pontoon.terminology",
"pontoon.tour",
"pontoon.translate",
"pontoon.translations",
"pontoon.homepage",
# Django contrib apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
# Django sites app is required by django-allauth
"django.contrib.sites",
# Third-party apps, patches, fixes
"django_jinja",
"django_nose",
"pipeline",
"session_csrf",
"guardian",
"corsheaders",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.fxa",
"allauth.socialaccount.providers.github",
"allauth.socialaccount.providers.google",
"allauth.socialaccount.providers.gitlab",
"notifications",
"graphene_django",
"webpack_loader",
"django_ace",
)
BLOCKED_IPS = os.environ.get("BLOCKED_IPS", "").split(",")
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.gzip.GZipMiddleware",
"pontoon.base.middleware.RaygunExceptionMiddleware",
"pontoon.base.middleware.BlockedIpMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"session_csrf.CsrfMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"csp.middleware.CSPMiddleware",
"pontoon.base.middleware.AutomaticLoginUserMiddleware",
)
CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.media",
"django.template.context_processors.request",
"session_csrf.context_processor",
"django.contrib.messages.context_processors.messages",
"pontoon.base.context_processors.globals",
)
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
"NAME": "jinja2",
"APP_DIRS": True,
"DIRS": [os.path.join(FRONTEND_DIR, "build")],
"OPTIONS": {
"match_extension": "",
"match_regex": re.compile(
r"""
^(?!(
admin|
registration|
account|
socialaccount|
graphene|
)/).*\.(
html|
jinja|
js|
)$
""",
re.VERBOSE,
),
"context_processors": CONTEXT_PROCESSORS,
"extensions": [
"jinja2.ext.do",
"jinja2.ext.loopcontrols",
"jinja2.ext.with_",
"jinja2.ext.i18n",
"jinja2.ext.autoescape",
"django_jinja.builtins.extensions.CsrfExtension",
"django_jinja.builtins.extensions.CacheExtension",
"django_jinja.builtins.extensions.TimezoneExtension",
"django_jinja.builtins.extensions.UrlsExtension",
"django_jinja.builtins.extensions.StaticFilesExtension",
"django_jinja.builtins.extensions.DjangoFiltersExtension",
"pipeline.jinja2.PipelineExtension",
"webpack_loader.contrib.jinja2ext.WebpackExtension",
],
},
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [path("pontoon/base/templates/django")],
"OPTIONS": {
"debug": DEBUG,
"context_processors": CONTEXT_PROCESSORS,
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
},
},
]
SESSION_COOKIE_SAMESITE = "lax"
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
"guardian.backends.ObjectPermissionBackend",
]
# This variable is required by django-guardian.
# App supports giving permissions for anonymous users.
ANONYMOUS_USER_ID = -1
GUARDIAN_RAISE_403 = True
PIPELINE_CSS = {
"base": {
"source_filenames": (
"css/fontawesome-all.css",
"css/nprogress.css",
"css/boilerplate.css",
"css/fonts.css",
"css/style.css",
),
"output_filename": "css/base.min.css",
},
"admin": {
"source_filenames": ("css/table.css", "css/admin.css",),
"output_filename": "css/admin.min.css",
},
"admin_project": {
"source_filenames": ("css/double_list_selector.css", "css/admin_project.css",),
"output_filename": "css/admin_project.min.css",
},
"project": {
"source_filenames": (
"css/table.css",
"css/request.css",
"css/contributors.css",
"css/heading_info.css",
"css/sidebar_menu.css",
"css/multiple_team_selector.css",
"css/manual_notifications.css",
),
"output_filename": "css/project.min.css",
},
"localization": {
"source_filenames": (
"css/table.css",
"css/contributors.css",
"css/heading_info.css",
"css/info.css",
),
"output_filename": "css/localization.min.css",
},
"projects": {
"source_filenames": ("css/heading_info.css", "css/table.css",),
"output_filename": "css/projects.min.css",
},
"team": {
"source_filenames": (
"css/table.css",
"css/double_list_selector.css",
"css/contributors.css",
"css/heading_info.css",
"css/team.css",
"css/info.css",
"css/request.css",
),
"output_filename": "css/team.min.css",
},
"teams": {
"source_filenames": (
"css/heading_info.css",
"css/table.css",
"css/request.css",
),
"output_filename": "css/teams.min.css",
},
"sync_logs": {
"source_filenames": ("css/sync_logs.css",),
"output_filename": "css/sync_logs.min.css",
},
"profile": {
"source_filenames": ("css/contributor.css", "css/profile.css",),
"output_filename": "css/profile.min.css",
},
"settings": {
"source_filenames": (
"css/multiple_team_selector.css",
"css/contributor.css",
"css/team_selector.css",
"css/settings.css",
),
"output_filename": "css/settings.min.css",
},
"notifications": {
"source_filenames": ("css/sidebar_menu.css", "css/notifications.css",),
"output_filename": "css/notifications.min.css",
},
"machinery": {
"source_filenames": ("css/team_selector.css", "css/machinery.css",),
"output_filename": "css/machinery.min.css",
},
"contributors": {
"source_filenames": ("css/heading_info.css", "css/contributors.css",),
"output_filename": "css/contributors.min.css",
},
"in_context": {
"source_filenames": ("css/bootstrap.min.css", "css/agency.css",),
"output_filename": "css/in_context.min.css",
},
"terms": {
"source_filenames": ("css/terms.css",),
"output_filename": "css/terms.min.css",
},
"homepage": {
"source_filenames": ("css/fullpage.css", "css/homepage.css",),
"output_filename": "css/homepage.min.css",
},
}
PIPELINE_JS = {
"base": {
"source_filenames": (
"js/lib/jquery-1.11.1.min.js",
"js/lib/jquery.timeago.js",
"js/lib/jquery.color-2.1.2.js",
"js/lib/nprogress.js",
"js/main.js",
),
"output_filename": "js/base.min.js",
},
"admin": {
"source_filenames": ("js/table.js",),
"output_filename": "js/admin.min.js",
},
"admin_project": {
"source_filenames": (
"js/lib/jquery-ui.js",
"js/double_list_selector.js",
"js/admin_project.js",
),
"output_filename": "js/admin_project.min.js",
},
"localization": {
"source_filenames": (
"js/table.js",
"js/progress-chart.js",
"js/tabs.js",
"js/info.js",
),
"output_filename": "js/localization.min.js",
},
"project": {
"source_filenames": (
"js/table.js",
"js/request.js",
"js/progress-chart.js",
"js/tabs.js",
"js/sidebar_menu.js",
"js/multiple_team_selector.js",
"js/manual_notifications.js",
),
"output_filename": "js/project.min.js",
},
"projects": {
"source_filenames": ("js/table.js", "js/progress-chart.js",),
"output_filename": "js/projects.min.js",
},
"team": {
"source_filenames": (
"js/table.js",
"js/progress-chart.js",
"js/double_list_selector.js",
"js/bugzilla.js",
"js/tabs.js",
"js/request.js",
"js/permissions.js",
"js/info.js",
),
"output_filename": "js/team.min.js",
},
"teams": {
"source_filenames": ("js/table.js", "js/progress-chart.js", "js/request.js",),
"output_filename": "js/teams.min.js",
},
"profile": {
"source_filenames": ("js/contributor.js",),
"output_filename": "js/profile.min.js",
},
"settings": {
"source_filenames": (
"js/lib/jquery-ui.js",
"js/multiple_team_selector.js",
"js/team_selector.js",
"js/settings.js",
),
"output_filename": "js/settings.min.js",
},
"notifications": {
"source_filenames": ("js/sidebar_menu.js", "js/notifications.js",),
"output_filename": "js/notifications.min.js",
},
"machinery": {
"source_filenames": (
"js/lib/diff.js",
"js/lib/clipboard.min.js",
"js/team_selector.js",
"js/machinery.js",
),
"output_filename": "js/machinery.min.js",
},
"homepage": {
"source_filenames": ("js/lib/fullpage.js", "js/homepage.js"),
"output_filename": "js/homepage.min.js",
},
}
PIPELINE = {
"STYLESHEETS": PIPELINE_CSS,
"JAVASCRIPT": PIPELINE_JS,
"YUGLIFY_BINARY": path(
os.environ.get("YUGLIFY_BINARY", "node_modules/.bin/yuglify")
),
"BABEL_BINARY": path("node_modules/.bin/babel"),
"BABEL_ARGUMENTS": "--modules ignore",
"DISABLE_WRAPPER": True,
}
# Cache config
# If the environment contains configuration data for Memcached, use
# BMemcached for the cache backend. Otherwise, default to an in-memory
# cache.
if os.environ.get("MEMCACHE_SERVERS") is not None:
CACHES = {
"default": {"BACKEND": "django_bmemcached.memcached.BMemcached", "OPTIONS": {}}
}
else:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "pontoon",
}
}
# Site ID is used by Django's Sites framework.
SITE_ID = 1
# Media and templates.
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path("media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/media/"
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = STATIC_HOST + "/static/"
STATICFILES_STORAGE = "pontoon.base.storage.CompressedManifestPipelineStorage"
STATICFILES_FINDERS = (
"pipeline.finders.PipelineFinder",
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
STATICFILES_DIRS = [
path("assets"),
os.path.join(FRONTEND_DIR, "build", "static"),
]
# Set ALLOWED_HOSTS based on SITE_URL setting.
def _allowed_hosts():
from django.conf import settings
from six.moves.urllib.parse import urlparse
host = urlparse(settings.SITE_URL).netloc # Remove protocol and path
result = [host]
# In order to be able to use ALLOWED_HOSTS to validate URLs, we need to
# have a version of the host that contains the port. This only applies
# to local development (usually the host is localhost:8000).
if ":" in host:
host_no_port = host.rsplit(":", 1)[0]
result = [host, host_no_port]
# add values from environment variable. Needed in case of URL/domain redirections
env_vars_str = os.getenv("ALLOWED_HOSTS", "127.0.0.1:8000")
env_vars = [x.strip() for x in env_vars_str.split(",")]
result.extend(env_vars)
return result
ALLOWED_HOSTS = lazy(_allowed_hosts, list)()
# Auth
# The first hasher in this list will be used for new passwords.
# Any other hasher in the list can be used for existing passwords.
PASSWORD_HASHERS = (
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
"django.contrib.auth.hashers.BCryptPasswordHasher",
"django.contrib.auth.hashers.SHA1PasswordHasher",
"django.contrib.auth.hashers.MD5PasswordHasher",
"django.contrib.auth.hashers.UnsaltedMD5PasswordHasher",
)
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {"console": {"class": "logging.StreamHandler"}},
"formatters": {
"verbose": {"format": "[%(levelname)s:%(name)s] %(asctime)s %(message)s"},
},
"loggers": {
"django": {"handlers": ["console"]},
"pontoon": {
"handlers": ["console"],
"level": os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "INFO"),
},
},
}
if DEBUG:
LOGGING["handlers"]["console"]["formatter"] = "verbose"
if os.environ.get("DJANGO_SQL_LOG", False):
LOGGING["loggers"]["django.db.backends"] = {
"level": "DEBUG",
"handlers": ["console"],
}
# Tests
TEST_RUNNER = "django_nose.NoseTestSuiteRunner"
NOSE_ARGS = [
"--logging-filter=-factory,-django.db,-raygun4py",
"--logging-clear-handlers",
]
# Disable nose-progressive on CI due to ugly output.
if not os.environ.get("CI", False):
NOSE_ARGS.append("--with-progressive")
# General auth settings
LOGIN_URL = "/"
LOGIN_REDIRECT_URL = "/"
LOGIN_REDIRECT_URL_FAILURE = "/"
# Should robots.txt deny everything or disallow a calculated list of
# URLs we don't want to be crawled? Default is false, disallow
# everything.
ENGAGE_ROBOTS = False
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
# Set X-Frame-Options to DENY by default on all responses.
X_FRAME_OPTIONS = "DENY"
# Use correct header for detecting HTTPS on Heroku.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Do not set SECURE_HSTS_SECONDS.
# HSTS is being taken care of in pontoon/wsgi.py.
# SECURE_HSTS_SECONDS = 63072000
# X-Content-Type-Options: nosniff
# Disables browser MIME type sniffing
SECURE_CONTENT_TYPE_NOSNIFF = True
# x-xss-protection: 1; mode=block
# Activates the browser's XSS filtering and helps prevent XSS attacks
SECURE_BROWSER_XSS_FILTER = True
# Redirect non-HTTPS requests to HTTPS
SECURE_SSL_REDIRECT = not (DEBUG or os.environ.get("CI", False))
# Content-Security-Policy headers
CSP_DEFAULT_SRC = ("'none'",)
CSP_CHILD_SRC = ("https:",)
CSP_FRAME_SRC = ("https:",) # Older browsers
CSP_CONNECT_SRC = (
"'self'",
"https://bugzilla.mozilla.org/rest/bug",
)
CSP_FONT_SRC = ("'self'",)
CSP_IMG_SRC = (
"'self'",
"https:",
# Needed for ACE editor images
"data:",
"https://*.wp.com/pontoon.mozilla.org/",
"https://www.google-analytics.com",
"https://www.gravatar.com/avatar/",
)
CSP_SCRIPT_SRC = (
"'self'",
"'unsafe-eval'",
"'sha256-fDsgbzHC0sNuBdM4W91nXVccgFLwIDkl197QEca/Cl4='",
# Rules related to Google Analytics
"'sha256-G5/M3dBlZdlvno5Cibw42fbeLr2PTEGd1M909Z7vPZE='",
"https://www.google-analytics.com/analytics.js",
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
)
# Needed if site not hosted on HTTPS domains (like local setup)
if not (HEROKU_DEMO or SITE_URL.startswith("https")):
CSP_IMG_SRC = CSP_IMG_SRC + ("http://www.gravatar.com/avatar/",)
CSP_CHILD_SRC = CSP_FRAME_SRC = CSP_FRAME_SRC + ("http:",)
# For absolute urls
try:
DOMAIN = socket.gethostname()
except socket.error:
DOMAIN = "localhost"
PROTOCOL = "http://"
PORT = 80
# Names for slave databases from the DATABASES setting.
SLAVE_DATABASES = []
# Internationalization.
# Enable timezone-aware datetimes.
USE_TZ = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = os.environ.get("TZ", "UTC")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Enable Bugs tab on the team pages, pulling data from bugzilla.mozilla.org.
# See bug 1567402 for details. A Mozilla-specific variable.
ENABLE_BUGS_TAB = os.environ.get("ENABLE_BUGS_TAB", "False") != "False"
# Bleach tags and attributes
ALLOWED_TAGS = [
"a",
"abbr",
"acronym",
"b",
"blockquote",
"br",
"code",
"em",
"i",
"li",
"ol",
"p",
"strong",
"ul",
]
ALLOWED_ATTRIBUTES = {
"a": ["href", "title", "target"],
"abbr": ["title"],
"acronym": ["title"],
}
SYNC_TASK_TIMEOUT = 60 * 60 * 1 # 1 hour
SYNC_LOG_RETENTION = 90 # days
MANUAL_SYNC = os.environ.get("MANUAL_SYNC", "False") != "False"
# Celery
# Execute celery tasks locally instead of in a worker unless the
# environment is configured.
CELERY_ALWAYS_EAGER = os.environ.get("CELERY_ALWAYS_EAGER", "True") != "False"
# Limit the number of tasks a celery worker can handle before being replaced.
try:
CELERYD_MAX_TASKS_PER_CHILD = int(os.environ.get("CELERYD_MAX_TASKS_PER_CHILD", ""))
except ValueError:
CELERYD_MAX_TASKS_PER_CHILD = 20
BROKER_POOL_LIMIT = 1 # Limit to one connection per worker
BROKER_CONNECTION_TIMEOUT = 30 # Give up connecting faster
CELERY_RESULT_BACKEND = None # We don't store results
CELERY_SEND_EVENTS = False # We aren't yet monitoring events
# The default serializer since Celery 4 is 'json'
CELERY_TASK_SERIALIZER = "pickle"
CELERY_RESULT_SERIALIZER = "pickle"
CELERY_ACCEPT_CONTENT = ["pickle"]
# Settings related to the CORS mechanisms.
# For the sake of integration with other sites,
# some of javascript files (e.g. pontoon.js)
# require Access-Control-Allow-Origin header to be set as '*'.
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r"^/(pontoon\.js|graphql/?)$"
SOCIALACCOUNT_ENABLED = True
SOCIALACCOUNT_ADAPTER = "pontoon.base.adapter.PontoonSocialAdapter"
# Supported values: 'django', 'fxa', 'github', 'gitlab', 'google'
AUTHENTICATION_METHOD = os.environ.get("AUTHENTICATION_METHOD", "django")
def account_username(user):
return user.name_or_email
# django-allauth settings
ACCOUNT_AUTHENTICATED_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "none"
ACCOUNT_USER_DISPLAY = account_username
# Firefox Accounts
FXA_CLIENT_ID = os.environ.get("FXA_CLIENT_ID")
FXA_SECRET_KEY = os.environ.get("FXA_SECRET_KEY")
FXA_OAUTH_ENDPOINT = os.environ.get("FXA_OAUTH_ENDPOINT", "")
FXA_PROFILE_ENDPOINT = os.environ.get("FXA_PROFILE_ENDPOINT", "")
FXA_SCOPE = ["profile:uid", "profile:display_name", "profile:email"]
# Github
GITHUB_CLIENT_ID = os.environ.get("GITHUB_CLIENT_ID")
GITHUB_SECRET_KEY = os.environ.get("GITHUB_SECRET_KEY")
# GitLab
GITLAB_URL = os.environ.get("GITLAB_URL", "https://gitlab.com")
GITLAB_CLIENT_ID = os.environ.get("GITLAB_CLIENT_ID")
GITLAB_SECRET_KEY = os.environ.get("GITLAB_SECRET_KEY")
# Google Accounts
GOOGLE_CLIENT_ID = os.environ.get("GOOGLE_CLIENT_ID")
GOOGLE_SECRET_KEY = os.environ.get("GOOGLE_SECRET_KEY")
# All settings related to the AllAuth
SOCIALACCOUNT_PROVIDERS = {
"fxa": {
"SCOPE": FXA_SCOPE,
"OAUTH_ENDPOINT": FXA_OAUTH_ENDPOINT,
"PROFILE_ENDPOINT": FXA_PROFILE_ENDPOINT,
},
"gitlab": {"GITLAB_URL": GITLAB_URL, "SCOPE": ["read_user"]},
}
# Defined all trusted origins that will be returned in pontoon.js file.
if os.environ.get("JS_TRUSTED_ORIGINS"):
JS_TRUSTED_ORIGINS = os.environ.get("JS_TRUSTED_ORIGINS").split(",")
else:
JS_TRUSTED_ORIGINS = [
SITE_URL,
]
# Configuration of `django-notifications-hq` app
DJANGO_NOTIFICATIONS_CONFIG = {
# Attach extra arguments passed to notify.send(...) to the .data attribute
# of the Notification object.
"USE_JSONFIELD": True,
}
# Maximum number of read notifications to display in the notifications menu
NOTIFICATIONS_MAX_COUNT = 7
|
the-stack_106_27289 | import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.arrays import SparseArray
from pandas.core.arrays.sparse import SparseDtype
class TestSparseDataFrameIndexing:
def test_getitem_sparse_column(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = pd.DataFrame({"A": data})
expected = pd.Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
@pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
@td.skip_if_no_scipy
def test_locindexer_from_spmatrix(self, spmatrix_t, dtype):
import scipy.sparse
spmatrix_t = getattr(scipy.sparse, spmatrix_t)
# The bug is triggered by a sparse matrix with purely sparse columns. So the
# recipe below generates a rectangular matrix of dimension (5, 7) where all the
# diagonal cells are ones, meaning the last two columns are purely sparse.
rows, cols = 5, 7
spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype)
df = pd.DataFrame.sparse.from_spmatrix(spmatrix)
# regression test for #34526
itr_idx = range(2, rows)
result = df.loc[itr_idx].values
expected = spmatrix.toarray()[itr_idx]
tm.assert_numpy_array_equal(result, expected)
# regression test for #34540
result = df.loc[itr_idx].dtypes.values
expected = np.full(cols, SparseDtype(dtype, fill_value=0))
tm.assert_numpy_array_equal(result, expected)
def test_reindex(self):
# https://github.com/pandas-dev/pandas/issues/35286
df = pd.DataFrame(
{"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))}
)
result = df.reindex([0, 2])
expected = pd.DataFrame(
{
"A": [0.0, np.nan],
"B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)),
},
index=[0, 2],
)
tm.assert_frame_equal(result, expected)
def test_all_sparse(self):
df = pd.DataFrame({"A": pd.array([0, 0], dtype=pd.SparseDtype("int64"))})
result = df.loc[[0, 1]]
tm.assert_frame_equal(result, df)
|
the-stack_106_27290 | # -*- coding: utf-8 -*-
from textwrap import dedent
import logging
import sys
import pytest
from parso.utils import split_lines
from parso import cache
from parso import load_grammar
from parso.python.diff import DiffParser, _assert_valid_graph
from parso import parse
ANY = object()
def test_simple():
"""
The diff parser reuses modules. So check for that.
"""
grammar = load_grammar()
module_a = grammar.parse('a', diff_cache=True)
assert grammar.parse('b', diff_cache=True) == module_a
def _check_error_leaves_nodes(node):
if node.type in ('error_leaf', 'error_node'):
return node
try:
children = node.children
except AttributeError:
pass
else:
for child in children:
x_node = _check_error_leaves_nodes(child)
if x_node is not None:
return x_node
return None
class Differ(object):
grammar = load_grammar()
def initialize(self, code):
logging.debug('differ: initialize')
try:
del cache.parser_cache[self.grammar._hashed][None]
except KeyError:
pass
self.lines = split_lines(code, keepends=True)
self.module = parse(code, diff_cache=True, cache=True)
assert code == self.module.get_code()
_assert_valid_graph(self.module)
return self.module
def parse(self, code, copies=0, parsers=0, expect_error_leaves=False):
logging.debug('differ: parse copies=%s parsers=%s', copies, parsers)
lines = split_lines(code, keepends=True)
diff_parser = DiffParser(
self.grammar._pgen_grammar,
self.grammar._tokenizer,
self.module,
)
new_module = diff_parser.update(self.lines, lines)
self.lines = lines
assert code == new_module.get_code()
_assert_valid_graph(new_module)
error_node = _check_error_leaves_nodes(new_module)
assert expect_error_leaves == (error_node is not None), error_node
if parsers is not ANY:
assert diff_parser._parser_count == parsers
if copies is not ANY:
assert diff_parser._copy_count == copies
return new_module
@pytest.fixture()
def differ():
return Differ()
def test_change_and_undo(differ):
func_before = 'def func():\n pass\n'
# Parse the function and a.
differ.initialize(func_before + 'a')
# Parse just b.
differ.parse(func_before + 'b', copies=1, parsers=1)
# b has changed to a again, so parse that.
differ.parse(func_before + 'a', copies=1, parsers=1)
# Same as before parsers should not be used. Just a simple copy.
differ.parse(func_before + 'a', copies=1)
# Now that we have a newline at the end, everything is easier in Python
# syntax, we can parse once and then get a copy.
differ.parse(func_before + 'a\n', copies=1, parsers=1)
differ.parse(func_before + 'a\n', copies=1)
# Getting rid of an old parser: Still no parsers used.
differ.parse('a\n', copies=1)
# Now the file has completely changed and we need to parse.
differ.parse('b\n', parsers=1)
# And again.
differ.parse('a\n', parsers=1)
def test_positions(differ):
func_before = 'class A:\n pass\n'
m = differ.initialize(func_before + 'a')
assert m.start_pos == (1, 0)
assert m.end_pos == (3, 1)
m = differ.parse('a', copies=1)
assert m.start_pos == (1, 0)
assert m.end_pos == (1, 1)
m = differ.parse('a\n\n', parsers=1)
assert m.end_pos == (3, 0)
m = differ.parse('a\n\n ', copies=1, parsers=2)
assert m.end_pos == (3, 1)
m = differ.parse('a ', parsers=1)
assert m.end_pos == (1, 2)
def test_if_simple(differ):
src = dedent('''\
if 1:
a = 3
''')
else_ = "else:\n a = ''\n"
differ.initialize(src + 'a')
differ.parse(src + else_ + "a", copies=0, parsers=1)
differ.parse(else_, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(src + else_, parsers=1)
def test_func_with_for_and_comment(differ):
# The first newline is important, leave it. It should not trigger another
# parser split.
src = dedent("""\
def func():
pass
for a in [1]:
# COMMENT
a""")
differ.initialize(src)
differ.parse('a\n' + src, copies=1, parsers=2)
def test_one_statement_func(differ):
src = dedent("""\
first
def func(): a
""")
differ.initialize(src + 'second')
differ.parse(src + 'def second():\n a', parsers=1, copies=1)
def test_for_on_one_line(differ):
src = dedent("""\
foo = 1
for x in foo: pass
def hi():
pass
""")
differ.initialize(src)
src = dedent("""\
def hi():
for x in foo: pass
pass
pass
""")
differ.parse(src, parsers=2)
src = dedent("""\
def hi():
for x in foo: pass
pass
def nested():
pass
""")
# The second parser is for parsing the `def nested()` which is an `equal`
# operation in the SequenceMatcher.
differ.parse(src, parsers=1, copies=1)
def test_open_parentheses(differ):
func = 'def func():\n a\n'
code = 'isinstance(\n\n' + func
new_code = 'isinstance(\n' + func
differ.initialize(code)
differ.parse(new_code, parsers=1, expect_error_leaves=True)
new_code = 'a = 1\n' + new_code
differ.parse(new_code, parsers=2, expect_error_leaves=True)
func += 'def other_func():\n pass\n'
differ.initialize('isinstance(\n' + func)
# Cannot copy all, because the prefix of the function is once a newline and
# once not.
differ.parse('isinstance()\n' + func, parsers=2, copies=1)
def test_open_parentheses_at_end(differ):
code = "a['"
differ.initialize(code)
differ.parse(code, parsers=1, expect_error_leaves=True)
def test_backslash(differ):
src = dedent(r"""
a = 1\
if 1 else 2
def x():
pass
""")
differ.initialize(src)
src = dedent(r"""
def x():
a = 1\
if 1 else 2
def y():
pass
""")
differ.parse(src, parsers=2)
src = dedent(r"""
def first():
if foo \
and bar \
or baz:
pass
def second():
pass
""")
differ.parse(src, parsers=1)
def test_full_copy(differ):
code = 'def foo(bar, baz):\n pass\n bar'
differ.initialize(code)
differ.parse(code, copies=1)
def test_wrong_whitespace(differ):
code = '''
hello
'''
differ.initialize(code)
differ.parse(code + 'bar\n ', parsers=3)
code += """abc(\npass\n """
differ.parse(code, parsers=2, copies=1, expect_error_leaves=True)
def test_issues_with_error_leaves(differ):
code = dedent('''
def ints():
str..
str
''')
code2 = dedent('''
def ints():
str.
str
''')
differ.initialize(code)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
def test_unfinished_nodes(differ):
code = dedent('''
class a():
def __init__(self, a):
self.a = a
def p(self):
a(1)
''')
code2 = dedent('''
class a():
def __init__(self, a):
self.a = a
def p(self):
self
a(1)
''')
differ.initialize(code)
differ.parse(code2, parsers=1, copies=2)
def test_nested_if_and_scopes(differ):
code = dedent('''
class a():
if 1:
def b():
2
''')
code2 = code + ' else:\n 3'
differ.initialize(code)
differ.parse(code2, parsers=1, copies=0)
def test_word_before_def(differ):
code1 = 'blub def x():\n'
code2 = code1 + ' s'
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=0, expect_error_leaves=True)
def test_classes_with_error_leaves(differ):
code1 = dedent('''
class X():
def x(self):
blablabla
assert 3
self.
class Y():
pass
''')
code2 = dedent('''
class X():
def x(self):
blablabla
assert 3
str(
class Y():
pass
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
def test_totally_wrong_whitespace(differ):
code1 = '''
class X():
raise n
class Y():
pass
'''
code2 = '''
class X():
raise n
str(
class Y():
pass
'''
differ.initialize(code1)
differ.parse(code2, parsers=4, copies=0, expect_error_leaves=True)
def test_node_insertion(differ):
code1 = dedent('''
class X():
def y(self):
a = 1
b = 2
c = 3
d = 4
''')
code2 = dedent('''
class X():
def y(self):
a = 1
b = 2
str
c = 3
d = 4
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=2)
def test_whitespace_at_end(differ):
code = dedent('str\n\n')
differ.initialize(code)
differ.parse(code + '\n', parsers=1, copies=1)
def test_endless_while_loop(differ):
"""
This was a bug in Jedi #878.
"""
code = '#dead'
differ.initialize(code)
module = differ.parse(code, parsers=1)
assert module.end_pos == (1, 5)
code = '#dead\n'
differ.initialize(code)
module = differ.parse(code + '\n', parsers=1)
assert module.end_pos == (3, 0)
def test_in_class_movements(differ):
code1 = dedent("""\
class PlaybookExecutor:
p
b
def run(self):
1
try:
x
except:
pass
""")
code2 = dedent("""\
class PlaybookExecutor:
b
def run(self):
1
try:
x
except:
pass
""")
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1)
def test_in_parentheses_newlines(differ):
code1 = dedent("""
x = str(
True)
a = 1
def foo():
pass
b = 2""")
code2 = dedent("""
x = str(True)
a = 1
def foo():
pass
b = 2""")
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1)
def test_indentation_issue(differ):
code1 = dedent("""
import module
""")
code2 = dedent("""
class L1:
class L2:
class L3:
def f(): pass
def f(): pass
def f(): pass
def f(): pass
""")
differ.initialize(code1)
differ.parse(code2, parsers=1)
def test_endmarker_newline(differ):
code1 = dedent('''\
docu = None
# some comment
result = codet
incomplete_dctassign = {
"module"
if "a":
x = 3 # asdf
''')
code2 = code1.replace('codet', 'coded')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
def test_newlines_at_end(differ):
differ.initialize('a\n\n')
differ.parse('a\n', copies=1)
def test_end_newline_with_decorator(differ):
code = dedent('''\
@staticmethod
def spam():
import json
json.l''')
differ.initialize(code)
module = differ.parse(code + '\n', copies=1, parsers=1)
decorated, endmarker = module.children
assert decorated.type == 'decorated'
decorator, func = decorated.children
suite = func.children[-1]
assert suite.type == 'suite'
newline, first_stmt, second_stmt = suite.children
assert first_stmt.get_code() == ' import json\n'
assert second_stmt.get_code() == ' json.l\n'
def test_invalid_to_valid_nodes(differ):
code1 = dedent('''\
def a():
foo = 3
def b():
la = 3
else:
la
return
foo
base
''')
code2 = dedent('''\
def a():
foo = 3
def b():
la = 3
if foo:
latte = 3
else:
la
return
foo
base
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=3)
def test_if_removal_and_reappearence(differ):
code1 = dedent('''\
la = 3
if foo:
latte = 3
else:
la
pass
''')
code2 = dedent('''\
la = 3
latte = 3
else:
la
pass
''')
code3 = dedent('''\
la = 3
if foo:
latte = 3
else:
la
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=4, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
differ.parse(code3, parsers=1, copies=1)
def test_add_error_indentation(differ):
code = 'if x:\n 1\n'
differ.initialize(code)
differ.parse(code + ' 2\n', parsers=1, copies=0, expect_error_leaves=True)
def test_differing_docstrings(differ):
code1 = dedent('''\
def foobar(x, y):
1
return x
def bazbiz():
foobar()
lala
''')
code2 = dedent('''\
def foobar(x, y):
2
return x + y
def bazbiz():
z = foobar()
lala
''')
differ.initialize(code1)
differ.parse(code2, parsers=3, copies=1)
differ.parse(code1, parsers=3, copies=1)
def test_one_call_in_function_change(differ):
code1 = dedent('''\
def f(self):
mro = [self]
for a in something:
yield a
def g(self):
return C(
a=str,
b=self,
)
''')
code2 = dedent('''\
def f(self):
mro = [self]
def g(self):
return C(
a=str,
t
b=self,
)
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_function_deletion(differ):
code1 = dedent('''\
class C(list):
def f(self):
def iterate():
for x in b:
break
return list(iterate())
''')
code2 = dedent('''\
class C():
def f(self):
for x in b:
break
return list(iterate())
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=0, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=0)
def test_docstring_removal(differ):
code1 = dedent('''\
class E(Exception):
"""
1
2
3
"""
class S(object):
@property
def f(self):
return cmd
def __repr__(self):
return cmd2
''')
code2 = dedent('''\
class E(Exception):
"""
1
3
"""
class S(object):
@property
def f(self):
return cmd
return cmd2
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=2)
differ.parse(code1, parsers=2, copies=1)
def test_paren_in_strange_position(differ):
code1 = dedent('''\
class C:
""" ha """
def __init__(self, message):
self.message = message
''')
code2 = dedent('''\
class C:
""" ha """
)
def __init__(self, message):
self.message = message
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=2, expect_error_leaves=True)
differ.parse(code1, parsers=0, copies=2)
def insert_line_into_code(code, index, line):
lines = split_lines(code, keepends=True)
lines.insert(index, line)
return ''.join(lines)
def test_paren_before_docstring(differ):
code1 = dedent('''\
# comment
"""
The
"""
from parso import tree
from parso import python
''')
code2 = insert_line_into_code(code1, 1, ' ' * 16 + 'raise InternalParseError(\n')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=2, copies=1)
def test_parentheses_before_method(differ):
code1 = dedent('''\
class A:
def a(self):
pass
class B:
def b(self):
if 1:
pass
''')
code2 = dedent('''\
class A:
def a(self):
pass
Exception.__init__(self, "x" %
def b(self):
if 1:
pass
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_indentation_issues(differ):
code1 = dedent('''\
class C:
def f():
1
if 2:
return 3
def g():
to_be_removed
pass
''')
code2 = dedent('''\
class C:
def f():
1
``something``, very ``weird``).
if 2:
return 3
def g():
to_be_removed
pass
''')
code3 = dedent('''\
class C:
def f():
1
if 2:
return 3
def g():
pass
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=2, expect_error_leaves=True)
differ.parse(code1, copies=2)
differ.parse(code3, parsers=2, copies=1)
differ.parse(code1, parsers=1, copies=2)
def test_error_dedent_issues(differ):
code1 = dedent('''\
while True:
try:
1
except KeyError:
if 2:
3
except IndexError:
4
5
''')
code2 = dedent('''\
while True:
try:
except KeyError:
1
except KeyError:
if 2:
3
except IndexError:
4
something_inserted
5
''')
differ.initialize(code1)
differ.parse(code2, parsers=6, copies=2, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=0)
def test_random_text_insertion(differ):
code1 = dedent('''\
class C:
def f():
return node
def g():
try:
1
except KeyError:
2
''')
code2 = dedent('''\
class C:
def f():
return node
Some'random text: yeah
for push in plan.dfa_pushes:
def g():
try:
1
except KeyError:
2
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_many_nested_ifs(differ):
code1 = dedent('''\
class C:
def f(self):
def iterate():
if 1:
yield t
else:
yield
return
def g():
3
''')
code2 = dedent('''\
def f(self):
def iterate():
if 1:
yield t
hahahaha
if 2:
else:
yield
return
def g():
3
''')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Async starts working in 3.5")
@pytest.mark.parametrize('prefix', ['', 'async '])
def test_with_and_funcdef_in_call(differ, prefix):
code1 = prefix + dedent('''\
with x:
la = C(
a=1,
b=2,
c=3,
)
''')
code2 = insert_line_into_code(code1, 3, 'def y(self, args):\n')
differ.initialize(code1)
differ.parse(code2, parsers=3, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_wrong_backslash(differ):
code1 = dedent('''\
def y():
1
for x in y:
continue
''')
code2 = insert_line_into_code(code1, 3, '\\.whl$\n')
differ.initialize(code1)
differ.parse(code2, parsers=2, copies=2, expect_error_leaves=True)
differ.parse(code1, parsers=1, copies=1)
def test_comment_change(differ):
differ.initialize('')
def test_random_unicode_characters(differ):
"""
Those issues were all found with the fuzzer.
"""
differ.initialize('')
differ.parse(u'\x1dĔBϞɛˁşʑ˳˻ȣſéÎ\x90̕ȟòwʘ\x1dĔBϞɛˁşʑ˳˻ȣſéÎ', parsers=1,
expect_error_leaves=True)
differ.parse(u'\r\r', parsers=1)
differ.parse(u"˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True)
differ.parse(u'a\ntaǁ\rGĒōns__\n\nb', parsers=1,
expect_error_leaves=sys.version_info[0] == 2)
s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):'
differ.parse(s, parsers=1, expect_error_leaves=True)
differ.parse('')
differ.parse(s + '\n', parsers=1, expect_error_leaves=True)
differ.parse(u' result = (\r\f\x17\t\x11res)', parsers=2, expect_error_leaves=True)
differ.parse('')
differ.parse(' a( # xx\ndef', parsers=2, expect_error_leaves=True)
@pytest.mark.skipif(sys.version_info < (2, 7), reason="No set literals in Python 2.6")
def test_dedent_end_positions(differ):
code1 = dedent('''\
if 1:
if b:
2
c = {
5}
''')
code2 = dedent('''\
if 1:
if ⌟ഒᜈྡྷṭb:
2
'l': ''}
c = {
5}
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=1)
def test_special_no_newline_ending(differ):
code1 = dedent('''\
1
''')
code2 = dedent('''\
1
is ''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=0)
def test_random_character_insertion(differ):
code1 = dedent('''\
def create(self):
1
if self.path is not None:
return
# 3
# 4
''')
code2 = dedent('''\
def create(self):
1
if 2:
x return
# 3
# 4
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=3, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_import_opening_bracket(differ):
code1 = dedent('''\
1
2
from bubu import (X,
''')
code2 = dedent('''\
11
2
from bubu import (X,
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2, expect_error_leaves=True)
def test_opening_bracket_at_end(differ):
code1 = dedent('''\
class C:
1
[
''')
code2 = dedent('''\
3
class C:
1
[
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
def test_all_sorts_of_indentation(differ):
code1 = dedent('''\
class C:
1
def f():
'same'
if foo:
a = b
end
''')
code2 = dedent('''\
class C:
1
def f(yield await %|(
'same'
\x02\x06\x0f\x1c\x11
if foo:
a = b
end
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=4, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=3)
code3 = dedent('''\
if 1:
a
b
c
d
\x00
''')
differ.parse(code3, parsers=2, expect_error_leaves=True)
differ.parse('')
def test_dont_copy_dedents_in_beginning(differ):
code1 = dedent('''\
a
4
''')
code2 = dedent('''\
1
2
3
4
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=2)
def test_dont_copy_error_leaves(differ):
code1 = dedent('''\
def f(n):
x
if 2:
3
''')
code2 = dedent('''\
def f(n):
def if 1:
indent
x
if 2:
3
''')
differ.initialize(code1)
differ.parse(code2, parsers=1, expect_error_leaves=True)
differ.parse(code1, parsers=2)
def test_error_dedent_in_between(differ):
code1 = dedent('''\
class C:
def f():
a
if something:
x
z
''')
code2 = dedent('''\
class C:
def f():
a
dedent
if other_thing:
b
if something:
x
z
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=2)
def test_some_other_indentation_issues(differ):
code1 = dedent('''\
class C:
x
def f():
""
copied
a
''')
code2 = dedent('''\
try:
de
a
b
c
d
def f():
""
copied
a
''')
differ.initialize(code1)
differ.parse(code2, copies=2, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=2, parsers=2)
def test_open_bracket_case1(differ):
code1 = dedent('''\
class C:
1
2 # ha
''')
code2 = insert_line_into_code(code1, 2, ' [str\n')
code3 = insert_line_into_code(code2, 4, ' str\n')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code1, copies=1, parsers=1)
def test_open_bracket_case2(differ):
code1 = dedent('''\
class C:
def f(self):
(
b
c
def g(self):
d
''')
code2 = dedent('''\
class C:
def f(self):
(
b
c
self.
def g(self):
d
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=2, parsers=0, expect_error_leaves=True)
def test_some_weird_removals(differ):
code1 = dedent('''\
class C:
1
''')
code2 = dedent('''\
class C:
1
@property
A
return
# x
omega
''')
code3 = dedent('''\
class C:
1
;
omega
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True)
differ.parse(code3, copies=1, parsers=2, expect_error_leaves=True)
differ.parse(code1, copies=1)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Async starts working in 3.5")
def test_async_copy(differ):
code1 = dedent('''\
async def main():
x = 3
print(
''')
code2 = dedent('''\
async def main():
x = 3
print()
''')
differ.initialize(code1)
differ.parse(code2, copies=1, parsers=1)
differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True)
|
the-stack_106_27291 | import re
import json
import requests
import termcolor
from .info import load_login_info, encrypt
from .utils import Page
QUOTE = re.compile('(?<!\\\\)\'')
def auth():
user, pwd = load_login_info()
encrypted, key = encrypt(pwd)
url = 'http://1.1.1.3/ac_portal/login.php'
data = {
'opr': 'pwdLogin',
'userName': user,
'pwd': encrypted,
'rc4Key': key,
'rememberPwd': 1
}
print('Sending Request...')
response = requests.post(url, data=data)
text = QUOTE.sub('"', response.text)
if json.loads(text).get('success', ''):
print(termcolor.colored('Connected!', 'green'))
else:
print(termcolor.colored('Login Failed: ' + response.text, 'red')) |
the-stack_106_27292 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from os.path import abspath, realpath, split, dirname
import collections
from datetime import datetime, timedelta
import time
import jinja2
import json
import logging
import pytz
import shutil
import sys
import logging
from jinja2.exceptions import UndefinedError
from .core import args
from .extras import parseFeedURL, fetch_content, templateContent, process_entry
from .styles import cssTextDecoded
from .__main__ import logFile
#argsFilename = args.filename
standardPath = os.getcwd()
# Get a list of feed URLs
try:
with open('feeds.txt') as f:
SUBSCRIPTIONS = list(f)
print('Loading feeds.txt')
except FileNotFoundError: # If you don't have 'feeds.txt' in specified path, you can specify one (nfsyndication-src --filename=sample.txt)
try:
for documentList in args.filename:
parent_location = os.getcwd()
with open(documentList) as f:
SUBSCRIPTIONS = list(f)
print("Loading file: {} from '{}'".format(documentList, os.path.join(parent_location)))
except TypeError:
raise Exception('NFSyndication [ERROR]: feeds.txt not found. See `nfsyndication-src --help` for more.')
posts = []
outJSONFeed= []
try:
for url in SUBSCRIPTIONS:
try:
feed = parseFeedURL(url)
blog = feed['feed']['title']
except KeyError:
if args.verbose and feed.bozo:
logging.error("Feed data summary on URL {}".format(url))
logging.error("Failed command: {} ".format(sys.argv[0:]))
logging.error("Exception [{bozo_exception}]: {bozo_message}".format(bozo_exception=str(feed.bozo_exception.__class__.__name__), bozo_message=str(feed.bozo_exception)))
logging.error('Response code is: {}'.format(feed.status))
if (hasattr(feed.bozo_exception, 'getLineNumber') and hasattr(feed.bozo_exception, 'getMessage')):
line = feed.bozo_exception.getLineNumber()
logging.error('Line %d: %s', line, feed.bozo_exception.getMessage())
logging.error('[NFSyndication] Writing output logs to {}'.format(os.path.join(standardPath, logFile)))
raise Exception(f"[{feed.bozo_exception}] (code {feed.status}) \n{(f'Could not fetch URL(s): {url}')}")
sys.exit(-1)
continue
for entry in feed['entries']:
post = process_entry(entry, blog, comp_field=args.comparator_filter)
if post:
posts.append(post)
outJSONFeed.append(feed)
try:
fetch_content(url)
except:
raise SystemExit
except NameError:
pass
if args.outputJSON:
with open(args.outputJSON, 'w+', encoding='utf8') as outf:
json.dump(outJSONFeed, outf, ensure_ascii=False, indent=4)
# Get the template, and drop in the posts
dir_path = os.path.split(os.path.realpath(__file__))[0]
try:
with open(f'{dir_path}/templates/template.html', encoding='utf8') as f:
print("\nChecking original template file...")
template = jinja2.Template(f.read())
with open(f'output/index.html', 'w', encoding='utf8') as f:
f.write(template.render(posts=posts, time=datetime.now()))
print('Successful.')
with open("output/style.css", 'w') as f:
f.write(cssTextDecoded)
except FileNotFoundError:
template = jinja2.Template(templateContent)
# When done, it converts to HTML
with open(f'output/index.html', 'w', encoding='utf8') as f:
f.write(template.render(cssText=cssTextDecoded, posts=posts, time=datetime.now()))
print('Successful.')
|
the-stack_106_27295 | from __future__ import unicode_literals
import frappe
import re
def execute():
for srl in frappe.get_all('Salary Slip',['name']):
if srl.get("name"):
substring = re.search("\/(.*?)\/",srl.get("name")).group(1)
emp = frappe.db.get_value('Employee',{'name':substring},'user_id')
if "Employee" in frappe.get_roles(emp) and "HR Manager" not in frappe.get_roles(emp) and len(frappe.get_all('User Permission',filters={'allow':"Salary Slip",'for_value':srl.get("name"),'user':emp}))==0:
print(emp,"***",substring)
permission=frappe.new_doc('User Permission')
permission.user= emp
permission.allow= 'Salary Slip'
permission.for_value= srl.get("name")
permission.apply_to_all_doctypes = 0
permission.applicable_for = 'Salary Slip'
permission.save()
#homzhub_customization.homzhub_customization.patches.set_salary_permission.execute |
the-stack_106_27296 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import platform
import shutil
import subprocess
import sys
def get_parser():
if platform.system() == "Linux":
default_platform = "linux"
elif platform.system() == "macos":
default_platform = "macosx"
elif platform.system() == "Windows":
default_platform = "windows"
else:
default_platform = "unknown"
parser = argparse.ArgumentParser()
parser.add_argument(
"--target-platform",
type=str,
dest="target_platform",
choices=["linux", "macosx", "windows", "iphoneos", "iphonesimulator"],
default=default_platform,
)
parser.add_argument(
"--build-system",
type=str,
dest="build_system",
default="Ninja",
help="Generator to pass into CMake",
)
parser.add_argument(
"--cmake-flags",
type=str,
dest="cmake_flags",
default="",
help="Additional flags to pass to CMake",
)
parser.add_argument(
"--build-type",
type=str,
dest="build_type",
choices=["MinSizeRel", "Debug"],
default=None,
help="Optimization level of build",
)
parser.add_argument(
"--http-proxy",
type=str,
dest="http_proxy",
default=os.environ.get("HTTP_PROXY", ""),
)
parser.add_argument("--distribute", action="store_true")
parser.add_argument("--32-bit", dest="is_32_bit", action="store_true")
parser.add_argument("--enable-asan", dest="enable_asan", action="store_true")
return parser
def is_visual_studio(build_system):
return "Visual Studio" in build_system
def run_command(cmd, **kwargs):
print("+ " + " ".join(cmd))
return subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr, **kwargs)
def which(cmd):
if sys.version_info >= (3, 3):
# On Python 3.3 and above, use shutil.which for a quick error message.
resolved = shutil.which(cmd)
if not resolved:
raise Exception("{} not found on PATH".format(cmd))
return os.path.realpath(resolved)
else:
# Manually check PATH
for p in os.environ["PATH"].split(os.path.pathsep):
p = os.path.join(p, cmd)
if "PATHEXT" in os.environ:
# try out adding each extension to the PATH as well
for ext in os.environ["PATHEXT"].split(os.path.pathsep):
# Add the extension.
p_and_extension = p + ext
if os.path.exists(p_and_extension) and os.access(
p_and_extension, os.X_OK
):
return os.path.realpath(p_and_extension)
else:
if os.path.isfile(p) and os.access(p, os.X_OK):
return os.path.realpath(p)
raise Exception("{} not found on PATH".format(cmd))
def build_dir_suffix(args):
build_dir_suffix = ""
if args.enable_asan:
build_dir_suffix += "_asan"
if args.distribute:
build_dir_suffix += "_release"
if args.is_32_bit:
build_dir_suffix += "_32"
return build_dir_suffix
|
the-stack_106_27297 | import itertools
import logging
from det3d.utils.config_tool import get_downsample_factor
tasks = [
dict(num_class=1, class_names=["car"]),
dict(num_class=2, class_names=["truck", "construction_vehicle"]),
dict(num_class=2, class_names=["bus", "trailer"]),
dict(num_class=1, class_names=["barrier"]),
dict(num_class=2, class_names=["motorcycle", "bicycle"]),
dict(num_class=2, class_names=["pedestrian", "traffic_cone"]),
]
class_names = list(itertools.chain(*[t["class_names"] for t in tasks]))
# training and testing settings
target_assigner = dict(
tasks=tasks,
)
# model settings
model = dict(
type="VoxelNet",
pretrained=None,
reader=dict(
type="VoxelFeatureExtractorV3",
# type='SimpleVoxel',
num_input_features=5,
),
backbone=dict(
type="SpMiddleResNetFHD", num_input_features=5, ds_factor=8
),
neck=dict(
type="RPN",
layer_nums=[5, 5],
ds_layer_strides=[1, 2],
ds_num_filters=[128, 256],
us_layer_strides=[1, 2],
us_num_filters=[256, 256],
num_input_features=256,
logger=logging.getLogger("RPN"),
),
bbox_head=dict(
type="CenterHead",
in_channels=sum([256,]),
tasks=tasks,
dataset='nuscenes',
weight=0.25,
code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 1.0, 1.0],
common_heads={'reg': (2, 2), 'height': (1, 2), 'dim':(3, 2), 'rot':(2, 2), 'vel': (2, 2)},
share_conv_channel=64,
dcn_head=False
),
)
assigner = dict(
target_assigner=target_assigner,
out_size_factor=get_downsample_factor(model),
dense_reg=1,
gaussian_overlap=0.1,
max_objs=500,
min_radius=2,
)
train_cfg = dict(assigner=assigner)
test_cfg = dict(
post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_per_img=500,
nms=dict(
use_rotate_nms=True,
use_multi_class_nms=False,
nms_pre_max_size=1000,
nms_post_max_size=83,
nms_iou_threshold=0.2,
),
score_threshold=0.1,
pc_range=[-54, -54],
out_size_factor=get_downsample_factor(model),
voxel_size=[0.075, 0.075]
)
# dataset settings
dataset_type = "NuScenesDataset"
nsweeps = 10
data_root = "data/nuScenes"
db_sampler = dict(
type="GT-AUG",
enable=False,
db_info_path="data/nuScenes/dbinfos_train_10sweeps_withvelo.pkl",
sample_groups=[
dict(car=2),
dict(truck=3),
dict(construction_vehicle=7),
dict(bus=4),
dict(trailer=6),
dict(barrier=2),
dict(motorcycle=6),
dict(bicycle=6),
dict(pedestrian=2),
dict(traffic_cone=2),
],
db_prep_steps=[
dict(
filter_by_min_num_points=dict(
car=5,
truck=5,
bus=5,
trailer=5,
construction_vehicle=5,
traffic_cone=5,
barrier=5,
motorcycle=5,
bicycle=5,
pedestrian=5,
)
),
dict(filter_by_difficulty=[-1],),
],
global_random_rotation_range_per_object=[0, 0],
rate=1.0,
)
train_preprocessor = dict(
mode="train",
shuffle_points=True,
global_rot_noise=[-0.78539816, 0.78539816],
global_scale_noise=[0.95, 1.05],
global_translate_std=0.5,
db_sampler=db_sampler,
class_names=class_names,
)
val_preprocessor = dict(
mode="val",
shuffle_points=False,
)
voxel_generator = dict(
range=[-54, -54, -5.0, 54, 54, 3.0],
voxel_size=[0.075, 0.075, 0.2],
max_points_in_voxel=10,
max_voxel_num=[120000, 160000],
)
train_pipeline = [
dict(type="LoadPointCloudFromFile", dataset=dataset_type),
dict(type="LoadPointCloudAnnotations", with_bbox=True),
dict(type="Preprocess", cfg=train_preprocessor),
dict(type="Voxelization", cfg=voxel_generator),
dict(type="AssignLabel", cfg=train_cfg["assigner"]),
dict(type="Reformat"),
# dict(type='PointCloudCollect', keys=['points', 'voxels', 'annotations', 'calib']),
]
test_pipeline = [
dict(type="LoadPointCloudFromFile", dataset=dataset_type),
dict(type="LoadPointCloudAnnotations", with_bbox=True),
dict(type="Preprocess", cfg=val_preprocessor),
dict(type="Voxelization", cfg=voxel_generator),
dict(type="AssignLabel", cfg=train_cfg["assigner"]),
dict(type="Reformat"),
]
train_anno = "data/nuScenes/infos_train_10sweeps_withvelo_filter_True.pkl"
val_anno = "data/nuScenes/infos_val_10sweeps_withvelo_filter_True.pkl"
test_anno = None
data = dict(
samples_per_gpu=8,
workers_per_gpu=16,
train=dict(
type=dataset_type,
root_path=data_root,
info_path=train_anno,
ann_file=train_anno,
nsweeps=nsweeps,
class_names=class_names,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
root_path=data_root,
info_path=val_anno,
test_mode=True,
ann_file=val_anno,
nsweeps=nsweeps,
class_names=class_names,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
root_path=data_root,
info_path=test_anno,
ann_file=test_anno,
nsweeps=nsweeps,
class_names=class_names,
pipeline=test_pipeline,
),
)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# optimizer
optimizer = dict(
type="adam", amsgrad=0.0, wd=0.01, fixed_wd=True, moving_average=False,
)
lr_config = dict(
type="one_cycle", lr_max=0.001, moms=[0.95, 0.85], div_factor=10.0, pct_start=0.4,
)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=5,
hooks=[
dict(type="TextLoggerHook"),
# dict(type='TensorboardLoggerHook')
],
)
# yapf:enable
# runtime settings
total_epochs = 20
device_ids = range(8)
dist_params = dict(backend="nccl", init_method="env://")
log_level = "INFO"
work_dir = './work_dirs/{}/'.format(__file__[__file__.rfind('/') + 1:-3])
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_106_27299 | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import retro
import click
import gym
import neat
from pytorch_neat.multi_env_eval import MultiEnvEvaluator
from pytorch_neat.neat_reporter import LogReporter
from pytorch_neat.recurrent_net import RecurrentNet
max_env_steps = 200
def make_env():
return retro.make("SonicTheHedgehog-Genesis")
def make_net(genome, config, bs):
return RecurrentNet.create(genome, config, bs)
def activate_net(net, states):
outputs = net.activate(states).numpy()
return outputs[:, 0] > 0.5
@click.command()
@click.option("--n_generations", type=int, default=100)
def run(n_generations):
# Load the config file, which is assumed to live in
# the same directory as this script.
config_path = os.path.join(os.path.dirname(__file__), "neat.cfg")
config = neat.Config(
neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path,
)
evaluator = MultiEnvEvaluator(
make_net, activate_net, make_env=make_env, max_env_steps=max_env_steps
)
def eval_genomes(genomes, config):
for _, genome in genomes:
genome.fitness = evaluator.eval_genome(genome, config)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
reporter = neat.StdOutReporter(True)
pop.add_reporter(reporter)
logger = LogReporter("neat.log", evaluator.eval_genome)
pop.add_reporter(logger)
pop.run(eval_genomes, n_generations)
if __name__ == "__main__":
run() # pylint: disable=no-value-for-parameter
|
the-stack_106_27301 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 8 08:33:28 2018
@author: pgallego
"""
import pydicom as dicom
#Bladder, Rectum, Body Fermoales, PTV
RdPath = 'C:\\Users\\pgallego\\Desktop\\Prostate1\\RD.1.2.246.352.71.7.2101921327.432885.20121219084743.dcm'
RsPath = 'C:\\Users\\pgallego\\Desktop\\Prostate1\\RS.1.2.246.352.71.4.2101921327.18465.20121219102519.dcm'
#
def FindStructures(RsPath):
BufetaList=['bladdder','Bufeta','bufeta','bllader','Bladder','BUFETA','BLADDER','bufet','VEJIGA','Vejiga']
RectoList=['RECTUM','Recte','rectum','recte','recto','RECTO','RECTE','OR-RECTE','Recto']
LFemoralListt=['femuresq','Cap femur esq','FEMURESQ','femurE','OR-FE','Cap femur E','Cap femur esquerre','capfemoralesq','Cap Femur E','Cap fem Esq','cap femoral esq','cap femoral Esq','C. femoral E','cap femoral esquerre','femur esq']
RFemoralListt=['femurdret','Cap femur dret','FEMURDRTE','femurD','OR-FD','Cap femur D','Cap femu dret','capfemodret','Cap Femur D','cap fem dret','FEMURD','cap femoral dret','cap femoral D','C. femoral D','cap femur dret','femur dret','fèmur dret']
PTV1List=['PTV1','PTV-P','P T V 1','p t v 1']
PTV11List=['PTV11','PTV1 1','PTV1.1','PTV 11','PTV11 sib']
string = "Locations : "
f = dicom.read_file(RsPath,force=True)
for i in range(len(f.RTROIObservationsSequence)):
string = string + " " +f.StructureSetROISequence[i].ROIName
if f.RTROIObservationsSequence[i].RTROIInterpretedType == 'EXTERNAL' :
Body = i
elif f.StructureSetROISequence[i].ROIName in BufetaList:
Bufeta =i
elif f.StructureSetROISequence[i].ROIName in RectoList:
Recto =i
elif f.StructureSetROISequence[i].ROIName in LFemoralListt:
LFemoral =i
elif f.StructureSetROISequence[i].ROIName in RFemoralListt:
RFemoral =i
elif f.StructureSetROISequence[i].ROIName in PTV1List:
PTV1 =i
elif f.StructureSetROISequence[i].ROIName in PTV11List:
PTV11 =i
print(string)
return [Body,Recto,Bufeta,LFemoral,RFemoral,PTV1,PTV11]
def FindStructuresBreast(RsPath,Side):
if Side =='L':
PulmonLList=['OR-PULMOESQ','OR PULMO izqdo','OR PULMO IZQ','OR PULMO ESQ','OR-PULMOE','pulmon','PULMO ESQ OK','OR_PULMOE2','PULMO ESQ']
PTVMamaList=['PTV MAMA izqda','MAMA 50GY','PTV MAMA IZQ','PTV mama Esq','PTV MAMA ESQ','PTV PAREDI','PTV-MAMAI','PTV-MAMAE','PTV PARET ESQ','PTV pared','PTV-MAME','PTV paredMAMAESQ','PTV-PAREDI','PTV-MAMAE2','PTV-PARETESQ','PTV-MAMAESQ','PTV PARET']
elif Side =='R':
PulmonLList=['OR-PDerecho','OR_PULMOD','OR PULMO Dret','OR-PULMOD','OR PULMO DRET','OR-PULMON DRET','OR PULMO D','OR-PULMO DT','OR.PULMO DRET','OR-PDerecho','OR PULMO DRET2','pulmon','PULMO DRET']
PTVMamaList=['PTV1','PTV PARET DRETA','PTV_MAMAD','PTV-MAMAD2','PTV MAMA','PTV pared','PTV MAMA DRETA','PTV MAMA dreta','PTV-MAMAD','PTV pared derech','PTV PARET DRETA','PTV-PARETD','PTV PAREDD','PTV PARET D','PTV pared mamaD','PTV-PARETETD','PTV MAMA DRETA2','PTV-PAREDD','PTV MAMAD']
CorList=['or-cor','OR COR','OR-COR','Heart','corazon','OR_COR2']
PTVAreaList=['PTV Axilo-SC Esq','PTV AxiloSC E','PTVarea SC','PTV Ã rees','PTV AIXELLA DRET','PTV_APEX','PTV-AREES2','PTV_AREES','PTV AREES','PTV AREAS','PTV-AREES','PTVareas','PTV-AREAS','PTV areas','PTV AREES D','PTV axilo-SC Dr','PTVAREES2CORREGI','PTV-APEX','PTV-AREE2','PTV AREASD','PTV- AREAS','PTV AREx']
string = "Locations : "
f = dicom.read_file(RsPath,force=True)
for i in range(len(f.RTROIObservationsSequence)):
string = string + " " +f.StructureSetROISequence[i].ROIName + "\n"
if f.RTROIObservationsSequence[i].RTROIInterpretedType == 'EXTERNAL' :
Body = i
elif f.StructureSetROISequence[i].ROIName in PulmonLList:
Pulmon =i
elif f.StructureSetROISequence[i].ROIName in CorList:
Cor =i
elif f.StructureSetROISequence[i].ROIName in PTVMamaList:
PTVMama =i
elif f.StructureSetROISequence[i].ROIName in PTVAreaList:
PTVAreas =i
#print(string )
if Side == "L":
return [Body,Pulmon,Cor,PTVMama,PTVAreas]
else:
return [Body,Pulmon,PTVMama,PTVAreas]
|
the-stack_106_27302 | #!/usr/bin/env python3
import sys
import json
import os
import argparse
columns = [
'count',
't_sum',
't_avg',
'operation',
'key'
]
parser = argparse.ArgumentParser()
parser.add_argument("--hide", choices=['count', 't_sum','t_avg','key'], nargs='+', help="Hide specified column")
parser.add_argument("--log", choices=["http", "duration", "all"], default="all", help="Log to analayse")
parser.add_argument("--sort", choices=columns + ['D'+c for c in columns], default='count', help="Sort criteria.")
parser.add_argument("--groupby", choices=['bucket'], default='bucket', help="Goruping by criteria")
parser.add_argument("--min", type=float, default=0, help="Duration below this time will be omitted")
parser.add_argument("--operation", choices=['add', 'lookup','search','bind', 'modify'], default=['add', 'lookup','search','bind', 'modify'], nargs='+', help="Database operations to include")
parser.add_argument("--type", choices=["csv", "html", "text"], default="text", help="Output type")
parser.add_argument("dir", help="Path to log dir")
args = parser.parse_args()
if not args.dir:
args.print_help()
sys.exit()
def sort_result(result):
descending = False
order_by = args.sort
if order_by[0] == 'D':
order_by = order_by[1:]
descending = True
result.sort(key=lambda mydic: mydic[order_by])
if descending:
result.reverse()
def get_formatted_str(v):
if type(v) == type(1):
return str(v).rjust(8)
elif type(v) == type(1.1):
return '{:0.3f}'.format(v).rjust(8)
else:
return v
def is_hidden(c):
if args.hide and c in args.hide:
return True
def print_result(result, k, heading):
columnsl = columns[:]
columnsl.remove('operation')
sort_result(result)
if args.type == 'text':
print(heading,':')
print('-'*(len(heading)+1))
print('#'.rjust(3), end=' ')
for c in columnsl:
if not is_hidden(c):
if c=='key':
print(' '+k, end=' ')
else:
print(c.rjust(8), end=' ')
print()
footer = {'count':0, 't_sum':0, 't_avg':0}
for ln, row in enumerate(result):
print(str(ln+1).rjust(3), end=' ')
for c in columnsl:
if not is_hidden(c):
if c == 'key':
print(' ', end=' ')
print(get_formatted_str(row[c]), end=' ')
if c in footer:
footer[c] += row[c]
print()
print('='*100)
print(' '.rjust(3), end=' ')
for c in columns[:-2]:
if not is_hidden(c):
if c=='t_avg':
pp = footer[c] / len(result)
else:
pp = footer[c]
print(get_formatted_str(pp), end=' ')
print(" GRANT TOTAL")
print()
print()
if args.type == 'html':
print('<table>')
print('<caption><b>{0}</b></caption>'.format(heading))
print('<tr><td style="padding-right:10px; padding-left:10px">#</td>', end=' ')
for c in columns:
if not is_hidden(c):
if c=='key':
td = k
else:
td = c
print('<td>{0}</td>'.format(td), end=' ')
print('</tr>')
footer = {'count':0, 't_sum':0, 't_avg':0}
for ln, row in enumerate(result):
print('<tr><td align="right">{0}</td>'.format(ln+1), end=' ')
for c in columns:
if not is_hidden(c):
if c == 'key':
align = ''
else:
align=' align="right"'
print('<td {0}>{1}</td>'.format(align, get_formatted_str(row[c]).strip()), end=' ')
if c in footer:
footer[c] += row[c]
print('</tr>')
print('<tr><td></td>', end=' ')
for c in columns[:-2]:
if not is_hidden(c):
if c=='t_avg':
pp = footer[c] / len(result)
else:
pp = footer[c]
print('<td align="right">{0}</td>'.format(get_formatted_str(pp).strip()), end=' ')
print('<td colspan="2"> GRANT TOTAL </td></tr>')
print('</table>\n')
print('<br><br>')
def http_log():
fn = os.path.join(args.dir, 'http_request_response.log')
if not os.path.exists(fn):
print("File {0} does not exists".format(fn))
return
rdict = {}
for l in open(fn):
ls = l.strip().split(' - ')
data = json.loads(ls[-1])
if data.get('method') == 'GET':
if data.get('duration'):
d = float(data['duration'][2:-1])
if d > args.min:
if data['path'] in rdict:
rdict[data['path']].append(d)
else:
rdict[data['path']] = [d]
if not rdict:
print("\n *** NO HTTP LOG ANALYSES IS AVAILABLE ***")
return
sn = 0
st = 0
result=[]
for path in rdict:
data = rdict[path]
n = len(data)
ssn = str(n).rjust(5)
sn += n
t = sum(data)
st += t
a= t/n
result.append({'count': n, 't_sum': t, 't_avg': a, 'key': path})
print_result(result, 'path', "HTTP REQUEST LOG ANALYSES")
def durations():
fn = os.path.join(args.dir,'oxauth_persistence_duration.log')
if not os.path.exists(fn):
print("File {0} does not exists".format(fn))
return
rdict = {}
operations = {}
buckets = {}
for l in open(fn):
ls = l.split(',')
operation = ls[1].split('operation: ')[1]
bucket = ls[3].strip()[8:]
if not bucket in buckets:
buckets[bucket] = []
if not operation in args.operation:
continue
ds = ls[2].strip()[12:-1]
if 'M' in ds:
m,s=ds.split('M')
d= float(m)*60 + float(s)
else:
d = float(ds)
if d > args.min:
if len(ls)>6:
p = ls[5].strip()
else:
p = ls[4].strip()
if p in rdict:
rdict[p].append(d)
else:
rdict[p] = [d]
operations[p] = operation
if args.groupby == 'bucket':
print("Grouped by", args.groupby)
sn = 0
st = 0
result = []
for path in rdict:
data = rdict[path]
n = len(data)
sn += n
t = sum(data)
a = t/n
result.append({'count': n, 't_sum': t, 't_avg': a, 'key': path, 'operation': operations[path]})
st += t
print_result(result, "expression", "DURATIONS LOG ANALYSES")
if args.type == 'html':
print('<!DOCTYPE html>\n<html>\n<head>')
print('<style>table, th, td {padding-right:10px; padding-left:10px; border: 1px solid black; border-collapse: collapse;} * {font-family: Arial, Helvetica, sans-serif;}</style>')
print('</head>\n<body>\n')
if args.log in ('duration', 'all'):
durations()
if args.log in ('http', 'all'):
http_log()
if args.type == 'html':
print('\n</body>\n</html>')
|
the-stack_106_27303 | """
PDBBind dataset loader.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import re
import time
import deepchem
import numpy as np
import pandas as pd
def featurize_pdbbind(data_dir=None, feat="grid", subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["-logKd/Ki"]
if "DEEPCHEM_DATA_DIR" in os.environ:
data_dir = os.environ["DEEPCHEM_DATA_DIR"]
else:
data_dir = "/tmp"
data_dir = os.path.join(data_dir, "pdbbind")
dataset_dir = os.path.join(data_dir, "%s_%s" % (subset, feat))
if not os.path.exists(dataset_dir):
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz'
)
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz'
)
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz'
)
os.system('tar -zxvf ' + os.path.join(data_dir, 'core_grid.tar.gz') + ' -C '
+ data_dir)
os.system('tar -zxvf ' + os.path.join(data_dir, 'full_grid.tar.gz') + ' -C '
+ data_dir)
os.system('tar -zxvf ' + os.path.join(data_dir, 'refined_grid.tar.gz') +
' -C ' + data_dir)
return deepchem.data.DiskDataset(dataset_dir), tasks
def load_pdbbind_grid(split="random",
featurizer="grid",
subset="full",
reload=True):
"""Load PDBBind datasets. Does not do train/test split"""
dataset, tasks = featurize_pdbbind(feat=featurizer, subset=subset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
return tasks, (train, valid, test), transformers
|
the-stack_106_27305 | import json
import logging
import re
from datetime import datetime
from funcy.colls import walk_values, get_in
from funcy.flow import silent
from funcy.seqs import flatten
from hivebase.exceptions import (
PostDoesNotExist,
VotingInvalidOnArchivedPost,
)
from hivebase.operations import CommentOptions
from .amount import Amount
from .commit import Commit
from .instance import shared_hived_instance
from .utils import construct_identifier, resolve_identifier
from .utils import parse_time, remove_from_dict
log = logging.getLogger(__name__)
class Post(dict):
""" This object gets instantiated by Hive.streams and is used as an
abstraction layer for Comments in Hive
Args:
post (str or dict): ``author/permlink`` or raw ``comment`` as
dictionary.
hived_instance (Hived): Hived node to connect to
"""
def __init__(self, post, hived_instance=None):
self.hived = hived_instance or shared_hived_instance()
self.commit = Commit(hived_instance=self.hived)
# will set these during refresh()
self.patched = False
self.category = None
self.root_identifier = None
if isinstance(post, str): # From identifier
self.identifier = self.parse_identifier(post)
elif isinstance(post,
dict) and "author" in post and "permlink" in post:
self.identifier = construct_identifier(post["author"],
post["permlink"])
else:
raise ValueError("Post expects an identifier or a dict "
"with author and permlink!")
self.refresh()
@staticmethod
def parse_identifier(uri):
""" Extract canonical post id/url (i.e. strip any leading `@`). """
return uri.split('@')[-1]
def refresh(self):
post_author, post_permlink = resolve_identifier(self.identifier)
post = self.hived.get_content(post_author, post_permlink)
if not post["permlink"]:
raise PostDoesNotExist("Post does not exist: %s" % self.identifier)
# If this 'post' comes from an operation, it might carry a patch
if "body" in post and re.match("^@@", post["body"]):
self.patched = True
# Parse Times
parse_times = [
"active", "cashout_time", "created", "last_payout", "last_update",
"max_cashout_time"
]
for p in parse_times:
post[p] = parse_time(post.get(p, "1970-01-01T00:00:00"))
# Parse Amounts
hbd_amounts = [
"total_payout_value",
"max_accepted_payout",
"pending_payout_value",
"curator_payout_value",
"total_pending_payout_value",
"promoted",
]
for p in hbd_amounts:
post[p] = Amount(post.get(p, "0.000 HBD"))
# turn json_metadata into python dict
meta_str = post.get("json_metadata", "{}")
post['json_metadata'] = silent(json.loads)(meta_str) or {}
post["tags"] = []
post['community'] = ''
if isinstance(post['json_metadata'], dict):
if post["depth"] == 0:
tags = [post["parent_permlink"]]
tags += get_in(post, ['json_metadata', 'tags'], default=[])
post["tags"] = set(tags)
post['community'] = get_in(
post, ['json_metadata', 'community'], default='')
# If this post is a comment, retrieve the root comment
self.root_identifier, self.category = self._get_root_identifier(post)
self._store_post(post)
def _store_post(self, post):
# Store original values as obtained from the rpc
for key, value in post.items():
super(Post, self).__setitem__(key, value)
# Set attributes as well
for key in post:
setattr(self, key, post[key])
# also set identifier
super(Post, self).__setitem__("identifier", self.identifier)
def __getattr__(self, key):
return object.__getattribute__(self, key)
def __getitem__(self, key):
return super(Post, self).__getitem__(key)
def __repr__(self):
return "<Post-%s>" % self.identifier
__str__ = __repr__
def _get_root_identifier(self, post=None):
if not post:
post = self
m = re.match("/([^/]*)/@([^/]*)/([^#]*).*", post.get("url", ""))
if not m:
return "", ""
else:
category = m.group(1)
author = m.group(2)
permlink = m.group(3)
return construct_identifier(author, permlink), category
def get_replies(self):
""" Return **first-level** comments of the post.
"""
post_author, post_permlink = resolve_identifier(self.identifier)
replies = self.hived.get_content_replies(post_author, post_permlink)
return map(silent(Post), replies)
@staticmethod
def get_all_replies(root_post=None, comments=list(), all_comments=list()):
""" Recursively fetch all the child comments, and return them as a list.
Usage: all_comments = Post.get_all_replies(Post('foo/bar'))
"""
# see if our root post has any comments
if root_post:
return Post.get_all_replies(comments=list(root_post.get_replies()))
if not comments:
return all_comments
# recursively scrape children one depth layer at a time
children = list(flatten([list(x.get_replies()) for x in comments]))
if not children:
return all_comments or comments
return Post.get_all_replies(
comments=children, all_comments=comments + children)
@property
def reward(self):
"""Return a float value of estimated total HBD reward.
"""
return Amount(self.get("total_payout_value", "0 HBD")) + \
Amount(self.get("pending_payout_value", "0 HBD"))
def time_elapsed(self):
"""Return a timedelta on how old the post is.
"""
return datetime.utcnow() - self['created']
def is_main_post(self):
""" Retuns True if main post, and False if this is a comment (reply).
"""
return self['depth'] == 0
def is_comment(self):
""" Retuns True if post is a comment
"""
return self['depth'] > 0
def curation_reward_pct(self):
""" If post is less than 15 minutes old, it will incur a curation
reward penalty. """
reward = (self.time_elapsed().seconds / 900) * 100
if reward > 100:
reward = 100
return reward
def export(self):
""" This method returns a dictionary that is type-safe to store as
JSON or in a database. """
self.refresh()
# Remove Hive instance object
safe_dict = remove_from_dict(self, ['hived', 'commit'])
# Convert Amount class objects into pure dictionaries
def decompose_amounts(item):
if type(item) == Amount:
return dict(item)
return item
return walk_values(decompose_amounts, safe_dict)
######################
# Commital Properties
######################
def upvote(self, weight=+100, voter=None):
""" Upvote the post
:param float weight: (optional) Weight for posting (-100.0 -
+100.0) defaults to +100.0
:param str voter: (optional) Voting account
"""
return self.vote(weight, voter=voter)
def downvote(self, weight=-100, voter=None):
""" Downvote the post
:param float weight: (optional) Weight for posting (-100.0 -
+100.0) defaults to -100.0
:param str voter: (optional) Voting account
"""
return self.vote(weight, voter=voter)
def vote(self, weight, voter=None):
""" Vote the post
:param float weight: Weight for posting (-100.0 - +100.0)
:param str voter: Voting account
"""
# Test if post is archived, if so, voting is worthless but just
# pollutes the blockchain and account history
if self.get('net_rshares', None) == None:
raise VotingInvalidOnArchivedPost
return self.commit.vote(self.identifier, weight, account=voter)
def edit(self, body, meta=None, replace=False):
""" Edit an existing post
:param str body: Body of the reply
:param json meta: JSON meta object that can be attached to the
post. (optional)
:param bool replace: Instead of calculating a *diff*, replace
the post entirely (defaults to ``False``)
"""
if not meta:
meta = {}
original_post = self
if replace:
newbody = body
else:
import diff_match_patch
dmp = diff_match_patch.diff_match_patch()
patch = dmp.patch_make(original_post["body"], body)
newbody = dmp.patch_toText(patch)
if not newbody:
log.info("No changes made! Skipping ...")
return
reply_identifier = construct_identifier(
original_post["parent_author"], original_post["parent_permlink"])
new_meta = {}
if meta:
if original_post["json_metadata"]:
import json
new_meta = original_post["json_metadata"].update(meta)
else:
new_meta = meta
return self.commit.post(
original_post["title"],
newbody,
reply_identifier=reply_identifier,
author=original_post["author"],
permlink=original_post["permlink"],
json_metadata=new_meta,
)
def reply(self, body, title="", author="", meta=None):
""" Reply to an existing post
:param str body: Body of the reply
:param str title: Title of the reply post
:param str author: Author of reply (optional) if not provided
``default_user`` will be used, if present, else
a ``ValueError`` will be raised.
:param json meta: JSON meta object that can be attached to the
post. (optional)
"""
return self.commit.post(
title,
body,
json_metadata=meta,
author=author,
reply_identifier=self.identifier)
def set_comment_options(self, options):
op = CommentOptions(
**{
"author":
self["author"],
"permlink":
self["permlink"],
"max_accepted_payout":
options.get("max_accepted_payout",
str(self["max_accepted_payout"])),
"percent_hive_dollars":
int(
options.get("percent_hive_dollars",
self["percent_hive_dollars"] / 100) * 100),
"allow_votes":
options.get("allow_votes", self["allow_votes"]),
"allow_curation_rewards":
options.get("allow_curation_rewards", self[
"allow_curation_rewards"]),
})
return self.commit.finalizeOp(op, self["author"], "posting")
|
the-stack_106_27306 | """A parser for reading NASA JPL GipsyX timeseries file
Example:
--------
from analyx import parsers
p = parsers.parse_file(parser_name='gipsyx_series', file_path='NYA1.series')
data = p.as_dict()
Description:
------------
Reads data from files in GipsyX timeseries format.
"""
# Standard library imports
from typing import Any, Dict, List, Union
# External library imports
import numpy as np
# Midgard imports
from midgard.data import dataset
from midgard.data import position
from midgard.dev import log
from midgard.dev import plugins
from midgard.parsers import LineParser
@plugins.register
class GipsyxSeriesParser(LineParser):
"""A parser for reading GipsyX timeseries file
Following **data** are available after reading GipsyX residual output file:
| Key | Description |
|----------------------|--------------------------------------------------------------------------------------|
| corr_en | Correlation East-North. |
| corr_ev | Correlation East-Vertical. |
| corr_nv | Correlation North-Vertical. |
| day | Day |
| decimalyear | Date in unit year. |
| east | East coordinate in [m]. |
| east_sigma | Standard devication of east coordinate in [m]. |
| hour | Hour |
| minute | Minute |
| month | Month |
| north | North coordinate in [m]. |
| north_sigma | Standard devication of north coordinate in [m]. |
| second | Second |
| time_past_j2000 | Time given in GPS seconds past J2000, whereby GipsyX uses following definition: |
| | J2000 is continuous seconds past Jan. 1, 2000 11:59:47 UTC. |
| vertical | Vertical coordinate in [m]. |
| vertical_sigma | Standard devication of vertical coordinate in [m]. |
| year | Year |
and **meta**-data:
| Key | Description |
|----------------------|--------------------------------------------------------------------------------------|
| \__data_path__ | File path |
| \__parser_name__ | Parser name |
"""
def setup_parser(self) -> Dict[str, Any]:
"""Set up information needed for the parser
This should return a dictionary with all parameters needed by np.genfromtxt to do the actual parsing.
Returns:
Dict: Parameters needed by np.genfromtxt to parse the input file.
"""
# ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----
# 1997.41546410 -0.129049 -0.184509 -0.104704 0.000704 0.000885 0.004622 0.057219 0.479851 0.539105 -81561750.00 1997 6 1 11 57 30
# 1997.41820195 -0.131761 -0.188031 -0.106736 0.000698 0.000846 0.004422 0.010166 0.229144 0.489866 -81475350.00 1997 6 2 11 57 30
# 1997.42093981 -0.128925 -0.186854 -0.109757 0.000743 0.000918 0.004718 0.031938 -0.126787 0.490283 -81388950.00 1997 6 3 11 57 30
return dict(
skip_header=1,
names=(
"decimalyear",
"east",
"north",
"vertical",
"east_sigma",
"north_sigma",
"vertical_sigma",
"corr_en",
"corr_ev",
"corr_nv",
"time_past_j2000",
"year",
"month",
"day",
"hour",
"minute",
"second",
),
delimiter=(13, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 6, 3, 3, 3, 3, 3),
dtype=(
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
"f8",
),
autostrip=True,
)
#
# WRITE DATA
#
def as_dataset(self, ref_pos: Union[np.ndarray, List[float]] = [0.0, 0.0, 0.0]) -> "Dataset":
"""Return the parsed data as a Dataset
Args:
ref_pos: Reference position given in terrestrial reference system and meters
Returns:
Midgard Dataset where timeseries data are stored with following fields:
| Field | Type | Description |
|---------------------|-------------------|----------------------------------------------------------------|
| obs.dpos | PositionDelta | Position delta object referred to a reference position |
| obs.dpos_sigma_east | numpy.array | Standard deviation of east position |
| obs.dpos_sigma_north| numpy.array | Standard deviation of north position |
| obs.dpos_sigma_up | numpy.array | Standard deviation of up position |
| time | Time | Parameter time given as TimeTable object |
"""
# Initialize dataset
dset = dataset.Dataset()
if not self.data:
log.warn("No data in {self.file_path}.")
return dset
dset.num_obs = len(self.data["decimalyear"])
dset.meta.update(self.meta)
# Add position
ref_pos = position.Position(np.repeat(np.array([ref_pos]), dset.num_obs, axis=0), system="trs")
dset.add_position_delta(
name="obs.dpos",
val=np.stack((self.data["east"], self.data["north"], self.data["vertical"]), axis=1),
system="enu",
ref_pos=ref_pos,
)
# TODO: sigma functionality has to be improved: dpos_sigma.enu.east, dpos_sigma.trs.x
## Add position sigma
# sigma = np.stack((self.data["east_sigma"], self.data["north_sigma"], self.data["vertical_sigma"]), axis=1)
# dset.add_sigma(name="dpos_sigma", val=dset.dpos.val, sigma=sigma, unit="meter")
dset.add_float(name="obs.dpos_sigma_east", val=self.data["east_sigma"], unit="meter")
dset.add_float(name="obs.dpos_sigma_north", val=self.data["north_sigma"], unit="meter")
dset.add_float(name="obs.dpos_sigma_up", val=self.data["vertical_sigma"], unit="meter")
# Add time
dset.add_time(
name="time", val=self.data["decimalyear"], scale="utc", fmt="decimalyear", write_level="operational"
)
return dset
|
the-stack_106_27307 | # -*- coding: utf-8 -*-
'''
Jinja loading utils to enable a more powerful backend for jinja templates
'''
# Import python libs
from __future__ import absolute_import
import json
import pprint
import logging
from os import path
from functools import wraps
# Import third party libs
import salt.ext.six as six
from jinja2 import BaseLoader, Markup, TemplateNotFound, nodes
from jinja2.environment import TemplateModule
from jinja2.ext import Extension
from jinja2.exceptions import TemplateRuntimeError
import jinja2
import yaml
# Import salt libs
import salt
import salt.utils
import salt.utils.url
import salt.fileclient
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
__all__ = [
'SaltCacheLoader',
'SerializerExtension'
]
# To dump OrderedDict objects as regular dicts. Used by the yaml
# template filter.
class OrderedDictDumper(yaml.Dumper): # pylint: disable=W0232
pass
yaml.add_representer(OrderedDict,
yaml.representer.SafeRepresenter.represent_dict,
Dumper=OrderedDictDumper)
class SaltCacheLoader(BaseLoader):
'''
A special jinja Template Loader for salt.
Requested templates are always fetched from the server
to guarantee that the file is up to date.
Templates are cached like regular salt states
and only loaded once per loader instance.
'''
def __init__(self, opts, saltenv='base', encoding='utf-8',
pillar_rend=False):
self.opts = opts
self.saltenv = saltenv
self.encoding = encoding
if self.opts['file_roots'] is self.opts['pillar_roots']:
self.searchpath = opts['file_roots'][saltenv]
else:
self.searchpath = [path.join(opts['cachedir'], 'files', saltenv)]
log.debug('Jinja search path: %s', self.searchpath)
self._file_client = None
self.cached = []
self.pillar_rend = pillar_rend
def file_client(self):
'''
Return a file client. Instantiates on first call.
'''
if not self._file_client:
self._file_client = salt.fileclient.get_file_client(
self.opts, self.pillar_rend)
return self._file_client
def cache_file(self, template):
'''
Cache a file from the salt master
'''
saltpath = salt.utils.url.create(template)
self.file_client().get_file(saltpath, '', True, self.saltenv)
def check_cache(self, template):
'''
Cache a file only once
'''
if template not in self.cached:
self.cache_file(template)
self.cached.append(template)
def get_source(self, environment, template):
# checks for relative '..' paths
if '..' in template:
log.warning(
'Discarded template path \'{0}\', relative paths are '
'prohibited'.format(template)
)
raise TemplateNotFound(template)
self.check_cache(template)
if environment and template:
tpldir = path.dirname(template).replace('\\', '/')
tpldata = {
'tplfile': template,
'tpldir': '.' if tpldir == '' else tpldir,
'tpldot': tpldir.replace('/', '.'),
}
environment.globals.update(tpldata)
# pylint: disable=cell-var-from-loop
for spath in self.searchpath:
filepath = path.join(spath, template)
try:
with salt.utils.fopen(filepath, 'rb') as ifile:
contents = ifile.read().decode(self.encoding)
mtime = path.getmtime(filepath)
def uptodate():
try:
return path.getmtime(filepath) == mtime
except OSError:
return False
return contents, filepath, uptodate
except IOError:
# there is no file under current path
continue
# pylint: enable=cell-var-from-loop
# there is no template file within searchpaths
raise TemplateNotFound(template)
class PrintableDict(OrderedDict):
'''
Ensures that dict str() and repr() are YAML friendly.
.. code-block:: python
mapping = OrderedDict([('a', 'b'), ('c', None)])
print mapping
# OrderedDict([('a', 'b'), ('c', None)])
decorated = PrintableDict(mapping)
print decorated
# {'a': 'b', 'c': None}
'''
def __str__(self):
output = []
for key, value in six.iteritems(self):
if isinstance(value, six.string_types):
# keeps quotes around strings
output.append('{0!r}: {1!r}'.format(key, value)) # pylint: disable=repr-flag-used-in-string
else:
# let default output
output.append('{0!r}: {1!s}'.format(key, value)) # pylint: disable=repr-flag-used-in-string
return '{' + ', '.join(output) + '}'
def __repr__(self): # pylint: disable=W0221
output = []
for key, value in six.iteritems(self):
# Raw string formatter required here because this is a repr
# function.
output.append('{0!r}: {1!r}'.format(key, value)) # pylint: disable=repr-flag-used-in-string
return '{' + ', '.join(output) + '}'
def ensure_sequence_filter(data):
'''
Ensure sequenced data.
**sequence**
ensure that parsed data is a sequence
.. code-block:: yaml
{% set my_string = "foo" %}
{% set my_list = ["bar", ] %}
{% set my_dict = {"baz": "qux"} %}
{{ my_string|sequence|first }}
{{ my_list|sequence|first }}
{{ my_dict|sequence|first }}
will be rendered as:
.. code-block:: yaml
foo
bar
baz
'''
if not isinstance(data, (list, tuple, set, dict)):
return [data]
return data
@jinja2.contextfunction
def show_full_context(ctx):
return ctx
class SerializerExtension(Extension, object):
'''
Yaml and Json manipulation.
**Format filters**
Allows jsonifying or yamlifying any data structure. For example, this dataset:
.. code-block:: python
data = {
'foo': True,
'bar': 42,
'baz': [1, 2, 3],
'qux': 2.0
}
.. code-block:: jinja
yaml = {{ data|yaml }}
json = {{ data|json }}
python = {{ data|python }}
will be rendered as::
yaml = {bar: 42, baz: [1, 2, 3], foo: true, qux: 2.0}
json = {"baz": [1, 2, 3], "foo": true, "bar": 42, "qux": 2.0}
python = {'bar': 42, 'baz': [1, 2, 3], 'foo': True, 'qux': 2.0}
The yaml filter takes an optional flow_style parameter to control the
default-flow-style parameter of the YAML dumper.
.. code-block:: jinja
{{ data|yaml(False) }}
will be rendered as:
.. code-block:: yaml
bar: 42
baz:
- 1
- 2
- 3
foo: true
qux: 2.0
**Load filters**
Strings and variables can be deserialized with **load_yaml** and
**load_json** tags and filters. It allows one to manipulate data directly
in templates, easily:
.. code-block:: jinja
{%- set yaml_src = "{foo: it works}"|load_yaml %}
{%- set json_src = "{'bar': 'for real'}"|load_json %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered as::
Dude, it works for real!
**Load tags**
Salt implements ``import_yaml`` and ``import_json`` tags. They work like
the `import tag`_, except that the document is also deserialized.
Syntaxes are ``{% load_yaml as [VARIABLE] %}[YOUR DATA]{% endload %}``
and ``{% load_json as [VARIABLE] %}[YOUR DATA]{% endload %}``
For example:
.. code-block:: jinja
{% load_yaml as yaml_src %}
foo: it works
{% endload %}
{% load_json as json_src %}
{
"bar": "for real"
}
{% endload %}
Dude, {{ yaml_src.foo }} {{ json_src.bar }}!
will be rendered as::
Dude, it works for real!
**Import tags**
External files can be imported and made available as a Jinja variable.
.. code-block:: jinja
{% import_yaml "myfile.yml" as myfile %}
{% import_json "defaults.json" as defaults %}
{% import_text "completeworksofshakespeare.txt" as poems %}
**Catalog**
``import_*`` and ``load_*`` tags will automatically expose their
target variable to import. This feature makes catalog of data to
handle.
for example:
.. code-block:: jinja
# doc1.sls
{% load_yaml as var1 %}
foo: it works
{% endload %}
{% load_yaml as var2 %}
bar: for real
{% endload %}
.. code-block:: jinja
# doc2.sls
{% from "doc1.sls" import var1, var2 as local2 %}
{{ var1.foo }} {{ local2.bar }}
.. _`import tag`: http://jinja.pocoo.org/docs/templates/#import
'''
tags = set(['load_yaml', 'load_json', 'import_yaml', 'import_json',
'load_text', 'import_text'])
def __init__(self, environment):
super(SerializerExtension, self).__init__(environment)
self.environment.filters.update({
'yaml': self.format_yaml,
'yaml_safe': self.format_yaml_safe,
'json': self.format_json,
'python': self.format_python,
'load_yaml': self.load_yaml,
'load_json': self.load_json,
'load_text': self.load_text,
})
if self.environment.finalize is None:
self.environment.finalize = self.finalizer
else:
finalizer = self.environment.finalize
@wraps(finalizer)
def wrapper(self, data):
return finalizer(self.finalizer(data))
self.environment.finalize = wrapper
def finalizer(self, data):
'''
Ensure that printed mappings are YAML friendly.
'''
def explore(data):
if isinstance(data, (dict, OrderedDict)):
return PrintableDict(
[(key, explore(value)) for key, value in six.iteritems(data)]
)
elif isinstance(data, (list, tuple, set)):
return data.__class__([explore(value) for value in data])
return data
return explore(data)
def format_json(self, value, sort_keys=True, indent=None):
return Markup(json.dumps(value, sort_keys=sort_keys, indent=indent).strip())
def format_yaml(self, value, flow_style=True):
yaml_txt = yaml.dump(value, default_flow_style=flow_style,
Dumper=OrderedDictDumper).strip()
if yaml_txt.endswith('\n...\n'):
yaml_txt = yaml_txt[:len(yaml_txt-5)]
return Markup(yaml_txt)
def format_yaml_safe(self, value, flow_style=True):
yaml_txt = yaml.safe_dump(value, default_flow_style=flow_style,
Dumper=OrderedDictDumper).strip()
if yaml_txt.endswith('\n...\n'):
yaml_txt = yaml_txt[:len(yaml_txt-5)]
return Markup(yaml_txt)
def format_python(self, value):
return Markup(pprint.pformat(value).strip())
def load_yaml(self, value):
if isinstance(value, TemplateModule):
value = str(value)
try:
return yaml.safe_load(value)
except AttributeError:
raise TemplateRuntimeError(
'Unable to load yaml from {0}'.format(value))
def load_json(self, value):
if isinstance(value, TemplateModule):
value = str(value)
try:
return json.loads(value)
except (ValueError, TypeError, AttributeError):
raise TemplateRuntimeError(
'Unable to load json from {0}'.format(value))
def load_text(self, value):
if isinstance(value, TemplateModule):
value = str(value)
return value
_load_parsers = set(['load_yaml', 'load_json', 'load_text'])
def parse(self, parser):
if parser.stream.current.value == 'import_yaml':
return self.parse_yaml(parser)
elif parser.stream.current.value == 'import_json':
return self.parse_json(parser)
elif parser.stream.current.value == 'import_text':
return self.parse_text(parser)
elif parser.stream.current.value in self._load_parsers:
return self.parse_load(parser)
parser.fail('Unknown format ' + parser.stream.current.value,
parser.stream.current.lineno)
# pylint: disable=E1120,E1121
def parse_load(self, parser):
filter_name = parser.stream.current.value
lineno = next(parser.stream).lineno
if filter_name not in self.environment.filters:
parser.fail('Unable to parse {0}'.format(filter_name), lineno)
parser.stream.expect('name:as')
target = parser.parse_assign_target()
macro_name = '_' + parser.free_identifier().name
macro_body = parser.parse_statements(
('name:endload',), drop_needle=True)
return [
nodes.Macro(
macro_name,
[],
[],
macro_body
).set_lineno(lineno),
nodes.Assign(
target,
nodes.Filter(
nodes.Call(
nodes.Name(macro_name, 'load').set_lineno(lineno),
[],
[],
None,
None
).set_lineno(lineno),
filter_name,
[],
[],
None,
None
).set_lineno(lineno)
).set_lineno(lineno)
]
def parse_yaml(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_yaml',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
def parse_json(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_json',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
def parse_text(self, parser):
import_node = parser.parse_import()
target = import_node.target
lineno = import_node.lineno
return [
import_node,
nodes.Assign(
nodes.Name(target, 'store').set_lineno(lineno),
nodes.Filter(
nodes.Name(target, 'load').set_lineno(lineno),
'load_text',
[],
[],
None,
None
)
.set_lineno(lineno)
).set_lineno(lineno)
]
# pylint: enable=E1120,E1121
|
the-stack_106_27310 | """A benchmark to be run externally.
Executes a program that might make heavy use of Result/Option types
in one of two ways: classically, with exceptions, or using result types.
The program checks several data stores (in memory to minimize interference
from slow IO &c.) in order for a key. If it finds it, it gets the value,
adds something to it, and then overwrites the value.
"""
import sys
import typing as t
from timeit import timeit
from safetywrap import Some, Nothing, Ok, Err, Option, Result
T = t.TypeVar("T")
class ClassicalDataStore:
def __init__(self, values: dict = None) -> None:
self._values = values or {}
def connect(self, fail: bool = False) -> "ClassicalDataStore":
"""'Connect' to the store."""
if fail:
raise RuntimeError("Failed to connect")
return self
def get(self, key: str) -> t.Any:
"""Return a value from the store."""
return self._values.get(key)
def insert(self, key: str, val: T, overwrite: bool = False) -> T:
"""Insert the value and return it."""
if key in self._values and not overwrite:
raise KeyError("Key already exists")
self._values[key] = val
return val
class MonadicDataStore:
"""Using the monadic types."""
def __init__(self, values: dict = None) -> None:
self._values = values or {}
def connect(self, fail: bool = False) -> Result["MonadicDataStore", str]:
if fail:
return Err("failed to connect")
return Ok(self)
def get(self, key: str) -> Option[t.Any]:
"""Return a value from the store."""
if key in self._values:
return Some(self._values[key])
return Nothing()
def insert(
self, key: str, val: T, overwrite: bool = False
) -> Result[T, str]:
"""Insert the value and return it."""
if key in self._values and not overwrite:
return Err("Key already exists")
self._values[key] = val
return Ok(val)
class Classical:
"""Run the program in the classical way."""
def __init__(self) -> None:
self._stores = {
0: ClassicalDataStore(),
1: ClassicalDataStore(),
2: ClassicalDataStore(),
3: ClassicalDataStore({"you": "me"}),
}
def run(self) -> None:
"""Run the program."""
for store in self._stores.values():
try:
store = store.connect()
except RuntimeError:
continue
val = store.get("you")
if val is not None:
new_val = val + "et"
try:
inserted = store.insert("you", new_val)
except KeyError:
# oops, need to specify overwrite
inserted = store.insert("you", new_val, overwrite=True)
assert inserted == "meet"
break
else:
raise RuntimeError("Could not get value anywhere.")
class Monadic:
"""Use the monadic types."""
def __init__(self) -> None:
self._stores = {
0: MonadicDataStore(),
1: MonadicDataStore(),
2: MonadicDataStore(),
3: MonadicDataStore({"you": "me"}),
}
def run(self) -> None:
"""Run the program."""
for unconnected in self._stores.values():
connected = unconnected.connect()
if connected.is_err():
continue
store = connected.unwrap()
inserted = (
store.get("you")
.ok_or("no such val")
.map(lambda val: str(val + "et"))
.and_then(
lambda val: store.insert("you", val).or_else(
lambda _: store.insert("you", val, overwrite=True)
)
)
)
if inserted.is_ok():
assert inserted.unwrap() == "meet"
break
else:
raise RuntimeError("Could not get value anywhere")
if __name__ == "__main__":
to_run = sys.argv[1].lower()
switch: t.Dict[str, t.Callable[[], None]] = {
"classical": lambda: Classical().run(),
"monadic": lambda: Monadic().run(),
}
if to_run not in switch:
raise RuntimeError("No such method: {}".format(to_run))
if len(sys.argv) > 2 and sys.argv[2] == "timeit":
# run internal timings
NUMBER = int(1e6)
taken = timeit("switch[to_run]()", globals=globals(), number=NUMBER)
print(taken / NUMBER)
else:
switch[to_run]()
|
the-stack_106_27312 | # -*- coding: utf-8 -*-
'''
Package support for openSUSE via the zypper package manager
:depends: - ``rpm`` Python module. Install with ``zypper install rpm-python``
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
'''
# Import python libs
from __future__ import absolute_import
import copy
import logging
import re
import os
import time
import datetime
# Import 3rd-party libs
# pylint: disable=import-error,redefined-builtin,no-name-in-module
import salt.ext.six as six
from salt.exceptions import SaltInvocationError
import salt.utils.event
from salt.ext.six.moves import configparser
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
# pylint: enable=import-error,redefined-builtin,no-name-in-module
from xml.dom import minidom as dom
from xml.parsers.expat import ExpatError
# Import salt libs
import salt.utils
import salt.utils.systemd
from salt.exceptions import (
CommandExecutionError, MinionError)
log = logging.getLogger(__name__)
HAS_ZYPP = False
ZYPP_HOME = '/etc/zypp'
LOCKS = '{0}/locks'.format(ZYPP_HOME)
REPOS = '{0}/repos.d'.format(ZYPP_HOME)
DEFAULT_PRIORITY = 99
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Set the virtual pkg module if the os is openSUSE
'''
if __grains__.get('os_family', '') != 'Suse':
return (False, "Module zypper: non SUSE OS not suppored by zypper package manager")
# Not all versions of SUSE use zypper, check that it is available
if not salt.utils.which('zypper'):
return (False, "Module zypper: zypper package manager not found")
return __virtualname__
class _Zypper(object):
'''
Zypper parallel caller.
Validates the result and either raises an exception or reports an error.
Allows serial zypper calls (first came, first won).
'''
SUCCESS_EXIT_CODES = [0, 100, 101, 102, 103]
LOCK_EXIT_CODE = 7
XML_DIRECTIVES = ['-x', '--xmlout']
ZYPPER_LOCK = '/var/run/zypp.pid'
TAG_RELEASED = 'zypper/released'
TAG_BLOCKED = 'zypper/blocked'
def __init__(self):
'''
Constructor
'''
self.__called = False
self._reset()
def _reset(self):
'''
Resets values of the call setup.
:return:
'''
self.__cmd = ['zypper', '--non-interactive']
self.__exit_code = 0
self.__call_result = dict()
self.__error_msg = ''
self.__env = {'SALT_RUNNING': "1"} # Subject to change
# Call config
self.__xml = False
self.__no_lock = False
self.__no_raise = False
self.__refresh = False
self.__ignore_repo_failure = False
self.__systemd_scope = False
def __call__(self, *args, **kwargs):
'''
:param args:
:param kwargs:
:return:
'''
# Ignore exit code for 106 (repo is not available)
if 'no_repo_failure' in kwargs:
self.__ignore_repo_failure = kwargs['no_repo_failure']
if 'systemd_scope' in kwargs:
self.__systemd_scope = kwargs['systemd_scope']
return self
def __getattr__(self, item):
'''
Call configurator.
:param item:
:return:
'''
# Reset after the call
if self.__called:
self._reset()
self.__called = False
if item == 'xml':
self.__xml = True
elif item == 'nolock':
self.__no_lock = True
elif item == 'noraise':
self.__no_raise = True
elif item == 'refreshable':
self.__refresh = True
elif item == 'call':
return self.__call
else:
return self.__dict__[item]
# Prevent the use of "refreshable" together with "nolock".
if self.__no_lock:
self.__no_lock = not self.__refresh
return self
@property
def exit_code(self):
return self.__exit_code
@exit_code.setter
def exit_code(self, exit_code):
self.__exit_code = int(exit_code or '0')
@property
def error_msg(self):
return self.__error_msg
@error_msg.setter
def error_msg(self, msg):
if self._is_error():
self.__error_msg = msg and os.linesep.join(msg) or "Check Zypper's logs."
@property
def stdout(self):
return self.__call_result.get('stdout', '')
@property
def stderr(self):
return self.__call_result.get('stderr', '')
@property
def pid(self):
return self.__call_result.get('pid', '')
def _is_error(self):
'''
Is this is an error code?
:return:
'''
return self.exit_code not in self.SUCCESS_EXIT_CODES
def _is_lock(self):
'''
Is this is a lock error code?
:return:
'''
return self.exit_code == self.LOCK_EXIT_CODE
def _is_xml_mode(self):
'''
Is Zypper's output is in XML format?
:return:
'''
return [itm for itm in self.XML_DIRECTIVES if itm in self.__cmd] and True or False
def _check_result(self):
'''
Check and set the result of a zypper command. In case of an error,
either raise a CommandExecutionError or extract the error.
result
The result of a zypper command called with cmd.run_all
'''
if not self.__call_result:
raise CommandExecutionError('No output result from Zypper?')
self.exit_code = self.__call_result['retcode']
if self._is_lock():
return False
if self._is_error():
_error_msg = list()
if not self._is_xml_mode():
msg = self.__call_result['stderr'] and self.__call_result['stderr'].strip() or ""
if msg:
_error_msg.append(msg)
else:
try:
doc = dom.parseString(self.__call_result['stdout'])
except ExpatError as err:
log.error(err)
doc = None
if doc:
msg_nodes = doc.getElementsByTagName('message')
for node in msg_nodes:
if node.getAttribute('type') == 'error':
_error_msg.append(node.childNodes[0].nodeValue)
elif self.__call_result['stderr'].strip():
_error_msg.append(self.__call_result['stderr'].strip())
self.error_msg = _error_msg
return True
def __call(self, *args, **kwargs):
'''
Call Zypper.
:param state:
:return:
'''
self.__called = True
if self.__xml:
self.__cmd.append('--xmlout')
if not self.__refresh:
self.__cmd.append('--no-refresh')
self.__cmd.extend(args)
kwargs['output_loglevel'] = 'trace'
kwargs['python_shell'] = False
kwargs['env'] = self.__env.copy()
if self.__no_lock:
kwargs['env']['ZYPP_READONLY_HACK'] = "1" # Disables locking for read-only operations. Do not try that at home!
# Zypper call will stuck here waiting, if another zypper hangs until forever.
# However, Zypper lock needs to be always respected.
was_blocked = False
while True:
cmd = []
if self.__systemd_scope:
cmd.extend(['systemd-run', '--scope'])
cmd.extend(self.__cmd)
log.debug("Calling Zypper: " + ' '.join(cmd))
self.__call_result = __salt__['cmd.run_all'](cmd, **kwargs)
if self._check_result():
break
if os.path.exists(self.ZYPPER_LOCK):
try:
data = __salt__['ps.proc_info'](int(open(self.ZYPPER_LOCK).readline()),
attrs=['pid', 'name', 'cmdline', 'create_time'])
data['cmdline'] = ' '.join(data['cmdline'])
data['info'] = 'Blocking process created at {0}.'.format(
datetime.datetime.utcfromtimestamp(data['create_time']).isoformat())
data['success'] = True
except Exception as err:
data = {'info': 'Unable to retrieve information about blocking process: {0}'.format(err.message),
'success': False}
else:
data = {'info': 'Zypper is locked, but no Zypper lock has been found.', 'success': False}
if not data['success']:
log.debug("Unable to collect data about blocking process.")
else:
log.debug("Collected data about blocking process.")
__salt__['event.fire_master'](data, self.TAG_BLOCKED)
log.debug("Fired a Zypper blocked event to the master with the data: {0}".format(str(data)))
log.debug("Waiting 5 seconds for Zypper gets released...")
time.sleep(5)
if not was_blocked:
was_blocked = True
if was_blocked:
__salt__['event.fire_master']({'success': not len(self.error_msg),
'info': self.error_msg or 'Zypper has been released'},
self.TAG_RELEASED)
if self.error_msg and not self.__no_raise and not self.__ignore_repo_failure:
raise CommandExecutionError('Zypper command failure: {0}'.format(self.error_msg))
return self._is_xml_mode() and dom.parseString(self.__call_result['stdout']) or self.__call_result['stdout']
__zypper__ = _Zypper()
def _systemd_scope():
return salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True)
def list_upgrades(refresh=True, **kwargs):
'''
List all available package upgrades on this system
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
if refresh:
refresh_db()
ret = dict()
cmd = ['list-updates']
if 'fromrepo' in kwargs:
repo_name = kwargs['fromrepo']
if not isinstance(repo_name, six.string_types):
repo_name = str(repo_name)
cmd.extend(['--repo', repo_name])
for update_node in __zypper__.nolock.xml.call(*cmd).getElementsByTagName('update'):
if update_node.getAttribute('kind') == 'package':
ret[update_node.getAttribute('name')] = update_node.getAttribute('edition')
return ret
# Provide a list_updates function for those used to using zypper list-updates
list_updates = salt.utils.alias_function(list_upgrades, 'list_updates')
def info_installed(*names, **kwargs):
'''
Return the information of the named package(s), installed on the system.
:param names:
Names of the packages to get information about.
:param attr:
Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
Valid attributes are:
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url,
summary, description.
:param errors:
Handle RPM field errors (true|false). By default, various mistakes in the textual fields are simply ignored and
omitted from the data. Otherwise a field with a mistake is not returned, instead a 'N/A (bad UTF-8)'
(not available, broken) text is returned.
Valid attributes are:
ignore, report
CLI example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> attr=version,vendor
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor
salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,vendor errors=true
'''
ret = dict()
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in pkg_nfo.items():
if isinstance(value, six.string_types):
# Check, if string is encoded in a proper UTF-8
if six.PY3:
value_ = value.encode('UTF-8', 'ignore').decode('UTF-8', 'ignore')
else:
value_ = value.decode('UTF-8', 'ignore').encode('UTF-8', 'ignore')
if value != value_:
value = kwargs.get('errors') and value_ or 'N/A (invalid UTF-8)'
log.error('Package {0} has bad UTF-8 code in {1}: {2}'.format(pkg_name, key, value))
if key == 'source_rpm':
t_nfo['source'] = value
else:
t_nfo[key] = value
ret[pkg_name] = t_nfo
return ret
def info_available(*names, **kwargs):
'''
Return the information of the named package available for the system.
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed or not.
CLI example:
.. code-block:: bash
salt '*' pkg.info_available <package1>
salt '*' pkg.info_available <package1> <package2> <package3> ...
'''
ret = {}
if not names:
return ret
else:
names = sorted(list(set(names)))
# Refresh db before extracting the latest package
if kwargs.get('refresh', True):
refresh_db()
pkg_info = []
batch = names[:]
batch_size = 200
# Run in batches
while batch:
pkg_info.extend(re.split(r"Information for package*",
__zypper__.nolock.call('info', '-t', 'package', *batch[:batch_size])))
batch = batch[batch_size:]
for pkg_data in pkg_info:
nfo = {}
for line in [data for data in pkg_data.split('\n') if ':' in data]:
if line.startswith('-----'):
continue
kw = [data.strip() for data in line.split(':', 1)]
if len(kw) == 2 and kw[1]:
nfo[kw[0].lower()] = kw[1]
if nfo.get('name'):
name = nfo.pop('name')
ret[name] = nfo
if nfo.get('status'):
nfo['status'] = nfo.get('status')
if nfo.get('installed'):
nfo['installed'] = nfo.get('installed').lower() == 'yes' and True or False
return ret
def info(*names, **kwargs):
'''
.. deprecated:: Nitrogen
Use :py:func:`~salt.modules.pkg.info_available` instead.
Return the information of the named package available for the system.
CLI example:
.. code-block:: bash
salt '*' pkg.info <package1>
salt '*' pkg.info <package1> <package2> <package3> ...
'''
salt.utils.warn_until('Nitrogen', "Please use 'pkg.info_available' instead")
return info_available(*names)
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
dict will be returned for that package.
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed or not.
CLI example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
ret = dict()
if not names:
return ret
names = sorted(list(set(names)))
package_info = info_available(*names, **kwargs)
for name in names:
pkg_info = package_info.get(name, {})
status = pkg_info.get('status', '').lower()
if status.find('not installed') > -1 or status.find('out-of-date') > -1:
ret[name] = pkg_info.get('version')
# Return a string if only one package name passed
if len(names) == 1 and len(ret):
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.alias_function(latest_version, 'available_version')
def upgrade_available(name, **kwargs):
'''
Check whether or not an upgrade is available for a given package
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed or not.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
# The "not not" tactic is intended here as it forces the return to be False.
return not not latest_version(name, **kwargs) # pylint: disable=C0113
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty dict if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
return __salt__['pkg_resource.version'](*names, **kwargs) or {}
def version_cmp(ver1, ver2, ignore_epoch=False):
'''
.. versionadded:: 2015.5.4
Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if
ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem
making the comparison.
ignore_epoch : False
Set to ``True`` to ignore the epoch when comparing versions
.. versionadded:: 2015.8.10,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
'''
return __salt__['lowpkg.version_cmp'](ver1, ver2, ignore_epoch=ignore_epoch)
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict with versions
as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
removed:
not supported
purge_desired:
not supported
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
cmd = ['rpm', '-qa', '--queryformat', '%{NAME}_|-%{VERSION}_|-%{RELEASE}_|-%|EPOCH?{%{EPOCH}}:{}|\\n']
ret = {}
for line in __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False).splitlines():
name, pkgver, rel, epoch = line.split('_|-')
if epoch:
pkgver = '{0}:{1}'.format(epoch, pkgver)
if rel:
pkgver += '-{0}'.format(rel)
__salt__['pkg_resource.add_pkg'](ret, name, pkgver)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def _get_configured_repos():
'''
Get all the info about repositories from the configurations.
'''
repos_cfg = configparser.ConfigParser()
repos_cfg.read([REPOS + '/' + fname for fname in os.listdir(REPOS)])
return repos_cfg
def _get_repo_info(alias, repos_cfg=None):
'''
Get one repo meta-data.
'''
try:
meta = dict((repos_cfg or _get_configured_repos()).items(alias))
meta['alias'] = alias
for key, val in six.iteritems(meta):
if val in ['0', '1']:
meta[key] = int(meta[key]) == 1
elif val == 'NONE':
meta[key] = None
return meta
except (ValueError, configparser.NoSectionError):
return {}
def get_repo(repo, **kwargs): # pylint: disable=unused-argument
'''
Display a repo.
CLI Example:
.. code-block:: bash
salt '*' pkg.get_repo alias
'''
return _get_repo_info(repo)
def list_repos():
'''
Lists all repos.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
'''
repos_cfg = _get_configured_repos()
all_repos = {}
for alias in repos_cfg.sections():
all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg)
return all_repos
def del_repo(repo):
'''
Delete a repo.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo alias
'''
repos_cfg = _get_configured_repos()
for alias in repos_cfg.sections():
if alias == repo:
doc = __zypper__.xml.call('rr', '--loose-auth', '--loose-query', alias)
msg = doc.getElementsByTagName('message')
if doc.getElementsByTagName('progress') and msg:
return {
repo: True,
'message': msg[0].childNodes[0].nodeValue,
}
raise CommandExecutionError('Repository \'{0}\' not found.'.format(repo))
def mod_repo(repo, **kwargs):
'''
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the following values are specified:
repo or alias
alias by which the zypper refers to the repo
url, mirrorlist or baseurl
the URL for zypper to reference
enabled
enable or disable (True or False) repository,
but do not remove if disabled.
refresh
enable or disable (True or False) auto-refresh of the repository.
cache
Enable or disable (True or False) RPM files caching.
gpgcheck
Enable or disable (True or False) GOG check for this repository.
gpgautoimport
Automatically trust and import new repository.
Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a
url can only be deleted if a mirrorlist is specified (or vice versa).
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo alias alias=new_alias
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
'''
repos_cfg = _get_configured_repos()
added = False
# An attempt to add new one?
if repo not in repos_cfg.sections():
url = kwargs.get('url', kwargs.get('mirrorlist', kwargs.get('baseurl')))
if not url:
raise CommandExecutionError(
'Repository \'{0}\' not found, and neither \'baseurl\' nor '
'\'mirrorlist\' was specified'.format(repo)
)
if not _urlparse(url).scheme:
raise CommandExecutionError(
'Repository \'{0}\' not found and URL for baseurl/mirrorlist '
'is malformed'.format(repo)
)
# Is there already such repo under different alias?
for alias in repos_cfg.sections():
repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)
# Complete user URL, in case it is not
new_url = _urlparse(url)
if not new_url.path:
new_url = _urlparse.ParseResult(scheme=new_url.scheme, # pylint: disable=E1123
netloc=new_url.netloc,
path='/',
params=new_url.params,
query=new_url.query,
fragment=new_url.fragment)
base_url = _urlparse(repo_meta['baseurl'])
if new_url == base_url:
raise CommandExecutionError(
'Repository \'{0}\' already exists as \'{1}\'.'.format(
repo,
alias
)
)
# Add new repo
__zypper__.xml.call('ar', url, repo)
# Verify the repository has been added
repos_cfg = _get_configured_repos()
if repo not in repos_cfg.sections():
raise CommandExecutionError(
'Failed add new repository \'{0}\' for unspecified reason. '
'Please check zypper logs.'.format(repo))
added = True
# Modify added or existing repo according to the options
cmd_opt = []
global_cmd_opt = []
call_refresh = False
if 'enabled' in kwargs:
cmd_opt.append(kwargs['enabled'] and '--enable' or '--disable')
if 'refresh' in kwargs:
cmd_opt.append(kwargs['refresh'] and '--refresh' or '--no-refresh')
if 'cache' in kwargs:
cmd_opt.append(
kwargs['cache'] and '--keep-packages' or '--no-keep-packages'
)
if 'gpgcheck' in kwargs:
cmd_opt.append(kwargs['gpgcheck'] and '--gpgcheck' or '--no-gpgcheck')
if 'priority' in kwargs:
cmd_opt.append("--priority={0}".format(kwargs.get('priority', DEFAULT_PRIORITY)))
if 'humanname' in kwargs:
cmd_opt.append("--name='{0}'".format(kwargs.get('humanname')))
if kwargs.get('gpgautoimport') is True:
global_cmd_opt.append('--gpg-auto-import-keys')
call_refresh = True
if cmd_opt:
cmd_opt = global_cmd_opt + ['mr'] + cmd_opt + [repo]
__zypper__.refreshable.xml.call(*cmd_opt)
comment = None
if call_refresh:
# when used with "zypper ar --refresh" or "zypper mr --refresh"
# --gpg-auto-import-keys is not doing anything
# so we need to specifically refresh here with --gpg-auto-import-keys
refresh_opts = global_cmd_opt + ['refresh'] + [repo]
__zypper__.xml.call(*refresh_opts)
elif not added and not cmd_opt:
comment = 'Specified arguments did not result in modification of repo'
repo = get_repo(repo)
if comment:
repo['comment'] = comment
return repo
def refresh_db():
'''
Force a repository refresh by calling ``zypper refresh --force``, return a dict::
{'<database name>': Bool}
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
ret = {}
out = __zypper__.refreshable.call('refresh', '--force')
for line in out.splitlines():
if not line:
continue
if line.strip().startswith('Repository') and '\'' in line:
key = line.split('\'')[1].strip()
if 'is up to date' in line:
ret[key] = False
elif line.strip().startswith('Building') and '\'' in line:
key = line.split('\'')[1].strip()
if 'done' in line:
ret[key] = True
return ret
def install(name=None,
refresh=False,
fromrepo=None,
pkgs=None,
sources=None,
downloadonly=None,
skip_verify=False,
version=None,
ignore_repo_failure=False,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package(s), add refresh=True to force a 'zypper refresh'
before package is installed.
name
The name of the package to be installed. Note that this parameter is
ignored if either ``pkgs`` or ``sources`` is passed. Additionally,
please note that this option can only be used to install packages from
a software repository. To install a package file manually, use the
``sources`` option.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
fromrepo
Specify a package repository to install from.
downloadonly
Only download the packages, do not install.
skip_verify
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
version
Can be either a version number, or the combination of a comparison
operator (<, >, <=, >=, =) and a version number (ex. '>1.2.3-4').
This parameter is ignored if ``pkgs`` or ``sources`` is passed.
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version. As with the ``version`` parameter above, comparison operators
can be used to target a specific version of a package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-4"}]'
salt '*' pkg.install pkgs='["foo", {"bar": "<1.2.3-4"}]'
sources
A list of RPM packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.rpm"},{"bar": "salt://bar.rpm"}]'
ignore_repo_failure
Zypper returns error code 106 if one of the repositories are not available for various reasons.
In case to set strict check, this parameter needs to be set to True. Default: False.
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
if refresh:
refresh_db()
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name, pkgs, sources, **kwargs)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
version_num = version
if version_num:
if pkgs is None and sources is None:
# Allow "version" to work for single package target
pkg_params = {name: version_num}
else:
log.warning("'version' parameter will be ignored for multiple package targets")
if pkg_type == 'repository':
targets = []
problems = []
for param, version_num in six.iteritems(pkg_params):
if version_num is None:
targets.append(param)
else:
match = re.match(r'^([<>])?(=)?([^<>=]+)$', version_num)
if match:
gt_lt, equal, verstr = match.groups()
targets.append('{0}{1}{2}'.format(param, ((gt_lt or '') + (equal or '')) or '=', verstr))
log.debug(targets)
else:
msg = ('Invalid version string \'{0}\' for package \'{1}\''.format(version_num, name))
problems.append(msg)
if problems:
for problem in problems:
log.error(problem)
return {}
else:
targets = pkg_params
old = list_pkgs()
downgrades = []
if fromrepo:
fromrepoopt = ['--force', '--force-resolution', '--from', fromrepo]
log.info('Targeting repo \'{0}\''.format(fromrepo))
else:
fromrepoopt = ''
cmd_install = ['install', '--name', '--auto-agree-with-licenses']
if not refresh:
cmd_install.insert(0, '--no-refresh')
if skip_verify:
cmd_install.append('--no-gpg-checks')
if downloadonly:
cmd_install.append('--download-only')
if fromrepo:
cmd_install.extend(fromrepoopt)
errors = []
# Split the targets into batches of 500 packages each, so that
# the maximal length of the command line is not broken
systemd_scope = _systemd_scope()
while targets:
cmd = cmd_install + targets[:500]
targets = targets[500:]
for line in __zypper__(no_repo_failure=ignore_repo_failure, systemd_scope=systemd_scope).call(*cmd).splitlines():
match = re.match(r"^The selected package '([^']+)'.+has lower version", line)
if match:
downgrades.append(match.group(1))
while downgrades:
cmd = cmd_install + ['--force'] + downgrades[:500]
downgrades = downgrades[500:]
__zypper__(no_repo_failure=ignore_repo_failure).call(*cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def upgrade(refresh=True,
dryrun=False,
dist_upgrade=False,
fromrepo=None,
novendorchange=False,
skip_verify=False,
**kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Run a full system upgrade, a zypper upgrade
refresh
force a refresh if set to True (default).
If set to False it depends on zypper if a refresh is
executed.
dryrun
If set to True, it creates a debug solver log file and then perform
a dry-run upgrade (no changes are made). Default: False
dist_upgrade
Perform a system dist-upgrade. Default: False
fromrepo
Specify a list of package repositories to upgrade from. Default: None
novendorchange
If set to True, no allow vendor changes. Default: False
skip_verify
Skip the GPG verification check (e.g., ``--no-gpg-checks``)
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade dist-upgrade=True fromrepo='["MyRepoName"]' novendorchange=True
salt '*' pkg.upgrade dist-upgrade=True dryrun=True
'''
cmd_update = (['dist-upgrade'] if dist_upgrade else ['update']) + ['--auto-agree-with-licenses']
if skip_verify:
# The '--no-gpg-checks' needs to be placed before the Zypper command.
cmd_update.insert(0, '--no-gpg-checks')
if refresh:
refresh_db()
if dryrun:
cmd_update.append('--dry-run')
if dist_upgrade:
if fromrepo:
for repo in fromrepo:
cmd_update.extend(['--from', repo])
log.info('Targeting repos: {0}'.format(fromrepo))
if novendorchange:
# TODO: Grains validation should be moved to Zypper class
if __grains__['osrelease_info'][0] > 11:
cmd_update.append('--no-allow-vendor-change')
log.info('Disabling vendor changes')
else:
log.warning('Disabling vendor changes is not supported on this Zypper version')
if dryrun:
# Creates a solver test case for debugging.
log.info('Executing debugsolver and performing a dry-run dist-upgrade')
__zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update + ['--debug-solver'])
old = list_pkgs()
__zypper__(systemd_scope=_systemd_scope()).noraise.call(*cmd_update)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if __zypper__.exit_code not in __zypper__.SUCCESS_EXIT_CODES:
result = {
'retcode': __zypper__.exit_code,
'stdout': __zypper__.stdout,
'stderr': __zypper__.stderr,
'pid': __zypper__.pid,
}
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
if dryrun:
ret = (__zypper__.stdout + os.linesep + __zypper__.stderr).strip()
return ret
def _uninstall(name=None, pkgs=None):
'''
Remove and purge do identical things but with different Zypper commands,
this function performs the common logic.
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [target for target in pkg_params if target in old]
if not targets:
return {}
systemd_scope = _systemd_scope()
errors = []
while targets:
__zypper__(systemd_scope=systemd_scope).call('remove', *targets[:500])
targets = targets[500:]
__context__.pop('pkg.list_pkgs', None)
ret = salt.utils.compare_dicts(old, list_pkgs())
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages with ``zypper -n remove``
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
return _uninstall(name=name, pkgs=pkgs)
def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any zypper commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Recursively remove a package and all dependencies which were installed
with it, this will call a ``zypper -n remove -u``
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return _uninstall(name=name, pkgs=pkgs)
def list_locks():
'''
List current package locks.
Return a dict containing the locked package with attributes::
{'<package>': {'case_sensitive': '<case_sensitive>',
'match_type': '<match_type>'
'type': '<type>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locks
'''
locks = {}
if os.path.exists(LOCKS):
with salt.utils.fopen(LOCKS) as fhr:
for meta in [item.split('\n') for item in fhr.read().split('\n\n')]:
lock = {}
for element in [el for el in meta if el]:
if ':' in element:
lock.update(dict([tuple([i.strip() for i in element.split(':', 1)]), ]))
if lock.get('solvable_name'):
locks[lock.pop('solvable_name')] = lock
return locks
def clean_locks():
'''
Remove unused locks that do not currently (with regard to repositories
used) lock any package.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_locks
'''
LCK = "removed"
out = {LCK: 0}
if not os.path.exists("/etc/zypp/locks"):
return out
for node in __zypper__.xml.call('cl').getElementsByTagName("message"):
text = node.childNodes[0].nodeValue.lower()
if text.startswith(LCK):
out[LCK] = text.split(" ")[1]
break
return out
def remove_lock(packages, **kwargs): # pylint: disable=unused-argument
'''
Remove specified package lock.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove_lock <package name>
salt '*' pkg.remove_lock <package1>,<package2>,<package3>
salt '*' pkg.remove_lock pkgs='["foo", "bar"]'
'''
locks = list_locks()
try:
packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
except MinionError as exc:
raise CommandExecutionError(exc)
removed = []
missing = []
for pkg in packages:
if locks.get(pkg):
removed.append(pkg)
else:
missing.append(pkg)
if removed:
__zypper__.call('rl', *removed)
return {'removed': len(removed), 'not_found': missing}
def add_lock(packages, **kwargs): # pylint: disable=unused-argument
'''
Add a package lock. Specify packages to lock by exact name.
CLI Example:
.. code-block:: bash
salt '*' pkg.add_lock <package name>
salt '*' pkg.add_lock <package1>,<package2>,<package3>
salt '*' pkg.add_lock pkgs='["foo", "bar"]'
'''
locks = list_locks()
added = []
try:
packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys())
except MinionError as exc:
raise CommandExecutionError(exc)
for pkg in packages:
if not locks.get(pkg):
added.append(pkg)
if added:
__zypper__.call('al', *added)
return {'added': len(added), 'packages': added}
def verify(*names, **kwargs):
'''
Runs an rpm -Va on a system, and returns the results in a dict
Files with an attribute of config, doc, ghost, license or readme in the
package header can be ignored using the ``ignore_types`` keyword argument
CLI Example:
.. code-block:: bash
salt '*' pkg.verify
salt '*' pkg.verify httpd
salt '*' pkg.verify 'httpd postfix'
salt '*' pkg.verify 'httpd postfix' ignore_types=['config','doc']
'''
return __salt__['lowpkg.verify'](*names, **kwargs)
def file_list(*packages):
'''
List the files that belong to a package. Not specifying any packages will
return a list of *every* file on the system's rpm database (not generally
recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
return __salt__['lowpkg.file_list'](*packages)
def file_dict(*packages):
'''
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of *every* file on the system's
rpm database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
return __salt__['lowpkg.file_dict'](*packages)
def modified(*packages, **flags):
'''
List the modified files that belong to a package. Not specifying any packages
will return a list of _all_ modified files on the system's RPM database.
.. versionadded:: 2015.5.0
Filtering by flags (True or False):
size
Include only files where size changed.
mode
Include only files which file's mode has been changed.
checksum
Include only files which MD5 checksum has been changed.
device
Include only files which major and minor numbers has been changed.
symlink
Include only files which are symbolic link contents.
owner
Include only files where owner has been changed.
group
Include only files where group has been changed.
time
Include only files where modification time of the file has been changed.
capabilities
Include only files where capabilities differ or not. Note: supported only on newer RPM versions.
CLI Examples:
.. code-block:: bash
salt '*' pkg.modified
salt '*' pkg.modified httpd
salt '*' pkg.modified httpd postfix
salt '*' pkg.modified httpd owner=True group=False
'''
return __salt__['lowpkg.modified'](*packages, **flags)
def owner(*paths):
'''
Return the name of the package that owns the file. Multiple file paths can
be passed. If a single path is passed, a string will be returned,
and if multiple paths are passed, a dictionary of file/package name
pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
'''
return __salt__['lowpkg.owner'](*paths)
def _get_patterns(installed_only=None):
'''
List all known patterns in repos.
'''
patterns = {}
for element in __zypper__.nolock.xml.call('se', '-t', 'pattern').getElementsByTagName('solvable'):
installed = element.getAttribute('status') == 'installed'
if (installed_only and installed) or not installed_only:
patterns[element.getAttribute('name')] = {
'installed': installed,
'summary': element.getAttribute('summary'),
}
return patterns
def list_patterns(refresh=False):
'''
List all known patterns from available repos.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_patterns
'''
if refresh:
refresh_db()
return _get_patterns()
def list_installed_patterns():
'''
List installed patterns on the system.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_installed_patterns
'''
return _get_patterns(installed_only=True)
def search(criteria, refresh=False):
'''
List known packags, available to the system.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.search <criteria>
'''
if refresh:
refresh_db()
solvables = __zypper__.nolock.xml.call('se', criteria).getElementsByTagName('solvable')
if not solvables:
raise CommandExecutionError(
'No packages found matching \'{0}\''.format(criteria)
)
out = {}
for solvable in [slv for slv in solvables
if slv.getAttribute('status') == 'not-installed'
and slv.getAttribute('kind') == 'package']:
out[solvable.getAttribute('name')] = {'summary': solvable.getAttribute('summary')}
return out
def _get_first_aggregate_text(node_list):
'''
Extract text from the first occurred DOM aggregate.
'''
if not node_list:
return ''
out = []
for node in node_list[0].childNodes:
if node.nodeType == dom.Document.TEXT_NODE:
out.append(node.nodeValue)
return '\n'.join(out)
def list_products(all=False, refresh=False):
'''
List all available or installed SUSE products.
all
List all products available or only installed. Default is False.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
Includes handling for OEM products, which read the OEM productline file
and overwrite the release value.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_products
salt '*' pkg.list_products all=True
'''
if refresh:
refresh_db()
ret = list()
OEM_PATH = "/var/lib/suseRegister/OEM"
cmd = list()
if not all:
cmd.append('--disable-repos')
cmd.append('products')
if not all:
cmd.append('-i')
product_list = __zypper__.nolock.xml.call(*cmd).getElementsByTagName('product-list')
if not product_list:
return ret # No products found
for prd in product_list[0].getElementsByTagName('product'):
p_nfo = dict()
for k_p_nfo, v_p_nfo in prd.attributes.items():
if k_p_nfo in ['isbase', 'installed']:
p_nfo[k_p_nfo] = bool(v_p_nfo in ['true', '1'])
elif v_p_nfo:
p_nfo[k_p_nfo] = v_p_nfo
eol = prd.getElementsByTagName('endoflife')
if eol:
p_nfo['eol'] = eol[0].getAttribute('text')
p_nfo['eol_t'] = int(eol[0].getAttribute('time_t') or 0)
p_nfo['description'] = " ".join(
[line.strip() for line in _get_first_aggregate_text(
prd.getElementsByTagName('description')
).split(os.linesep)]
)
if 'productline' in p_nfo and p_nfo['productline']:
oem_file = os.path.join(OEM_PATH, p_nfo['productline'])
if os.path.isfile(oem_file):
with salt.utils.fopen(oem_file, 'r') as rfile:
oem_release = rfile.readline().strip()
if oem_release:
p_nfo['release'] = oem_release
ret.append(p_nfo)
return ret
def download(*packages, **kwargs):
'''
Download packages to the local disk.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
CLI example:
.. code-block:: bash
salt '*' pkg.download httpd
salt '*' pkg.download httpd postfix
'''
if not packages:
raise SaltInvocationError('No packages specified')
refresh = kwargs.get('refresh', False)
if refresh:
refresh_db()
pkg_ret = {}
for dld_result in __zypper__.xml.call('download', *packages).getElementsByTagName("download-result"):
repo = dld_result.getElementsByTagName("repository")[0]
path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path")
pkg_info = {
'repository-name': repo.getAttribute('name'),
'repository-alias': repo.getAttribute('alias'),
'path': path,
}
key = _get_first_aggregate_text(
dld_result.getElementsByTagName('name')
)
if __salt__['lowpkg.checksum'](pkg_info['path']):
pkg_ret[key] = pkg_info
if pkg_ret:
failed = [pkg for pkg in packages if pkg not in pkg_ret]
if failed:
pkg_ret['_error'] = ('The following package(s) failed to download: {0}'.format(', '.join(failed)))
return pkg_ret
raise CommandExecutionError(
'Unable to download packages: {0}'.format(', '.join(packages))
)
def diff(*paths):
'''
Return a formatted diff between current files and original in a package.
NOTE: this function includes all files (configuration and not), but does
not work on binary content.
:param path: Full path to the installed file
:return: Difference string or raises and exception if examined file is binary.
CLI example:
.. code-block:: bash
salt '*' pkg.diff /etc/apache2/httpd.conf /etc/sudoers
'''
ret = {}
pkg_to_paths = {}
for pth in paths:
pth_pkg = __salt__['lowpkg.owner'](pth)
if not pth_pkg:
ret[pth] = os.path.exists(pth) and 'Not managed' or 'N/A'
else:
if pkg_to_paths.get(pth_pkg) is None:
pkg_to_paths[pth_pkg] = []
pkg_to_paths[pth_pkg].append(pth)
if pkg_to_paths:
local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys())
for pkg, files in six.iteritems(pkg_to_paths):
for path in files:
ret[path] = __salt__['lowpkg.diff'](
local_pkgs[pkg]['path'],
path
) or 'Unchanged'
return ret
|
the-stack_106_27313 | import json
from http import HTTPStatus
import responses # type: ignore
from flask import current_app
import pytest
import sqlalchemy # type: ignore
from sqlalchemy.exc import OperationalError
from lighthouse.constants import (
FIELD_COG_BARCODE,
FIELD_ROOT_SAMPLE_ID,
MLWH_LH_SAMPLE_ROOT_SAMPLE_ID,
MLWH_LH_SAMPLE_COG_UK_ID,
FIELD_RNA_ID,
FIELD_RESULT
)
from lighthouse.helpers.plates import (
add_cog_barcodes,
create_post_body,
get_centre_prefix,
get_samples,
get_positive_samples,
update_mlwh_with_cog_uk_ids,
UnmatchedSampleError
)
def test_add_cog_barcodes(app, centres, samples, mocked_responses):
with app.app_context():
baracoda_url = f"http://{current_app.config['BARACODA_URL']}/barcodes_group/TS1/new?count={len(samples)}"
# remove the cog_barcode key and value from the samples fixture before testing
map(lambda sample: sample.pop(FIELD_COG_BARCODE), samples)
cog_barcodes = ("123", "456", "789", "101", "131", "161", "192", "222")
# update the 'cog_barcode' tuple when adding more samples to the fixture data
assert len(cog_barcodes) == len(samples)
mocked_responses.add(
responses.POST,
baracoda_url,
body=json.dumps({"barcodes_group": {"barcodes": cog_barcodes}}),
status=HTTPStatus.CREATED,
)
add_cog_barcodes(samples)
for idx, sample in enumerate(samples):
assert FIELD_COG_BARCODE in sample.keys()
assert sample[FIELD_COG_BARCODE] == cog_barcodes[idx]
def test_centre_prefix(app, centres, mocked_responses):
with app.app_context():
assert get_centre_prefix("TEST1") == "TS1"
assert get_centre_prefix("test2") == "TS2"
assert get_centre_prefix("TeSt3") == "TS3"
def test_create_post_body(app, samples):
with app.app_context():
barcode = "12345"
correct_body = {
"data": {
"type": "plates",
"attributes": {
"barcode": "12345",
"purpose_uuid": current_app.config["SS_UUID_PLATE_PURPOSE"],
"study_uuid": current_app.config["SS_UUID_STUDY"],
"wells": {
"A01": {
"content": {
"phenotype": "positive",
"supplier_name": "abc",
"sample_description": "MCM001",
}
},
"B01": {
"content": {
"phenotype": "negative",
"supplier_name": "def",
"sample_description": "MCM002",
}
},
"C01": {
"content": {
"phenotype": "void",
"supplier_name": "hij",
"sample_description": "MCM003",
}
},
"D01": {
"content": {
"phenotype": "limit of detection",
"supplier_name": "klm",
"sample_description": "MCM004",
}
},
"E01": {
"content": {
"phenotype": "positive",
"supplier_name": "nop",
"sample_description": "MCM005",
}
},
"F01": {
"content": {
"phenotype": "positive",
"supplier_name": "qrs",
"sample_description": "MCM006",
}
},
"G01": {
"content": {
"phenotype": "positive",
"supplier_name": "tuv",
"sample_description": "MCM007",
}
},
"A02": {
"content": {
"phenotype": "positive",
"supplier_name": "wxy",
"sample_description": "CBIQA_MCM008",
}
}
},
},
}
}
assert create_post_body(barcode, samples) == correct_body
def test_get_samples(app, samples):
with app.app_context():
assert len(get_samples("123")) == 8
def test_get_positive_samples(app, samples):
with app.app_context():
assert len(get_positive_samples("123")) == 3
def test_get_positive_samples_different_plates(app, samples_different_plates):
with app.app_context():
assert len(get_positive_samples("123")) == 1
def test_update_mlwh_with_cog_uk_ids(app, mlwh_lh_samples_multiple, samples_for_mlwh_update, cog_uk_ids, sql_engine):
with app.app_context():
# check that the samples already exist in the MLWH db but do not have cog uk ids
before = retrieve_samples_cursor(app.config, sql_engine)
before_count = 0
for row in before:
before_count += 1
assert row[MLWH_LH_SAMPLE_COG_UK_ID] is None
assert before_count == 3
# run the function we're testing
update_mlwh_with_cog_uk_ids(samples_for_mlwh_update)
# check that the same samples in the MLWH now have the correct cog uk ids
after = retrieve_samples_cursor(app.config, sql_engine)
after_count = 0
after_cog_uk_ids = set()
for row in after:
after_count += 1
after_cog_uk_ids.add(row[MLWH_LH_SAMPLE_COG_UK_ID])
assert after_count == before_count
assert after_cog_uk_ids == set(cog_uk_ids)
def test_update_mlwh_with_cog_uk_ids_connection_fails(app, mlwh_lh_samples_multiple, samples_for_mlwh_update):
with app.app_context():
# mock this out to cause an exception
app.config['MLWH_RW_CONN_STRING'] = 'notarealconnectionstring'
with pytest.raises(OperationalError):
update_mlwh_with_cog_uk_ids(samples_for_mlwh_update)
def test_update_mlwh_with_cog_uk_ids_field_missing(app, mlwh_lh_samples_multiple):
with app.app_context():
samples = [{
FIELD_ROOT_SAMPLE_ID: 'root_1',
FIELD_RNA_ID: 'rna_1',
FIELD_RESULT: 'positive'
# no cog uk id
}]
with pytest.raises(KeyError):
update_mlwh_with_cog_uk_ids(samples)
def test_update_mlwh_with_cog_uk_ids_unmatched_sample(app, mlwh_lh_samples_multiple, samples_for_mlwh_update, cog_uk_ids, sql_engine):
# Should - update the ones it can, but then log a detailed error, and throw an exception
with app.app_context():
# add sample that doesn't match one in the MLWH
samples_for_mlwh_update.append({
FIELD_ROOT_SAMPLE_ID: 'root_253',
FIELD_RNA_ID: 'rna_253',
FIELD_RESULT: 'positive',
FIELD_COG_BARCODE: 'cog_253'
})
# check that the expected number of samples are in the MLWH db but do not have cog uk ids
before = retrieve_samples_cursor(app.config, sql_engine)
before_count = 0
for row in before:
before_count += 1
assert row[MLWH_LH_SAMPLE_COG_UK_ID] is None
assert before_count == 3
# check the function raises an exception due to the unmatched sample
with pytest.raises(UnmatchedSampleError):
update_mlwh_with_cog_uk_ids(samples_for_mlwh_update)
# check that the matched samples in the MLWH now have the correct cog uk ids
after = retrieve_samples_cursor(app.config, sql_engine)
after_count = 0
after_cog_uk_ids = set()
for row in after:
after_count += 1
after_cog_uk_ids.add(row[MLWH_LH_SAMPLE_COG_UK_ID])
assert after_count == before_count
assert after_cog_uk_ids == set(cog_uk_ids)
def retrieve_samples_cursor(config, sql_engine):
with sql_engine.connect() as connection:
results = connection.execute(f"SELECT {MLWH_LH_SAMPLE_ROOT_SAMPLE_ID}, {MLWH_LH_SAMPLE_COG_UK_ID} from lighthouse_sample")
return results
|
the-stack_106_27315 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter, Dropout
class ModelWithDummyParameter(nn.Module):
def __init__(self):
super().__init__()
self.dummy_param = Parameter(torch.zeros(1))
@abstractmethod
def forward(self, x):
pass
class ManyNonEvalModules(ModelWithDummyParameter):
class AuxBranch(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
self.weight = Parameter(torch.ones([1, 1]))
def forward(self, x):
x = F.linear(x, self.weight)
x = self.linear(x)
x = F.relu(x)
return x
class ModuleWithMixedModules(nn.Module):
def __init__(self):
super().__init__()
self.weight = Parameter(torch.ones([1, 1]))
self.not_called_linear = nn.Linear(1, 1)
self.called_linear = nn.Linear(1, 1)
def forward(self, x):
x = Dropout(p=0.2)(x)
x = F.linear(x, self.weight)
x = Dropout(p=0.2)(x)
x = self.called_linear(x)
return x
def __init__(self):
super().__init__()
self.aux_branch = self.AuxBranch()
self.mixed_modules = self.ModuleWithMixedModules()
self.avg_pool = nn.AvgPool2d(1)
def forward(self, x):
x = self.avg_pool(x)
if self.training:
aux = self.aux_branch(x)
x = self.mixed_modules(x)
return x, aux if self.training else x
class PoolUnPool(ModelWithDummyParameter):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool3d(3, stride=2, return_indices=True)
self.unpool = nn.MaxUnpool3d(3, stride=2)
def forward(self, input_):
output, indices = self.pool(input_)
return self.unpool(output, indices)
class ArangeModel(ModelWithDummyParameter):
def forward(self, dummy_x):
return torch.arange(0, dummy_x.size(0), dtype=torch.int64)
class TransposeModel(ModelWithDummyParameter):
def forward(self, x):
o1 = x.transpose(dim0=0, dim1=0)
o2 = x.permute(dims=[0])
return o1, o2
class GatherModel(ModelWithDummyParameter):
def forward(self, x):
index = torch.zeros(1, dtype=torch.int64).to(x.device)
o1 = torch.where(self.dummy_param > 0, x, self.dummy_param)
o2 = torch.index_select(x, dim=0, index=index)
o3 = x.index_select(dim=0, index=index)
o4 = x[0]
return o1, o2, o3, o4
class MaskedFillModel(ModelWithDummyParameter):
def forward(self, x):
o1 = x.masked_fill_(self.dummy_param > 0, 1.0)
o2 = x.masked_fill(self.dummy_param > 0, 1.0)
return o1, o2
class ReshapeModel(ModelWithDummyParameter):
def forward(self, x):
torch.squeeze(x)
torch.unsqueeze(x, dim=0)
torch.flatten(x)
return x.reshape([1]), x.squeeze(), x.flatten(), x.unsqueeze(dim=0), x.view([1])
|
the-stack_106_27316 | import bokeh.plotting
import bokeh.layouts
import bokeh.models
def main():
columns = []
color_mappers = []
for name in bokeh.palettes.mpl:
figures = []
for number in bokeh.palettes.mpl[name]:
palette = bokeh.palettes.mpl[name][number]
color_mapper = bokeh.models.LinearColorMapper(palette=palette, low=0, high=1)
color_mappers.append(color_mapper)
figures.append(make_figure(color_mapper))
columns.append(bokeh.layouts.column(*figures))
button = bokeh.models.Button()
def on_click():
for mapper in color_mappers:
mapper.palette = mapper.palette[::-1]
button.on_click(on_click)
row = bokeh.layouts.row(*columns)
document = bokeh.plotting.curdoc()
document.add_root(button)
document.add_root(row)
def make_figure(color_mapper):
padding = 5
margin = 20
plot_height = 60
plot_width = 250
figure = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
toolbar_location=None,
min_border=0,
background_fill_alpha=0,
border_fill_alpha=0,
outline_line_color=None,
)
figure.axis.visible = False
colorbar = bokeh.models.ColorBar(
color_mapper=color_mapper,
location=(0, 0),
height=10,
width=int(plot_width - (margin + padding)),
padding=padding,
orientation="horizontal",
major_tick_line_color="black",
bar_line_color="black",
background_fill_alpha=0.,
)
colorbar.title = ""
figure.add_layout(colorbar, 'center')
return figure
if __name__.startswith("bk"):
main()
|
the-stack_106_27317 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC
from collections import defaultdict
from enum import Enum
from typing import Dict
import onnx
import torch
from nemo.core.classes import typecheck
from nemo.core.neural_types import AxisKind, NeuralType
from nemo.utils import logging
from nemo.utils.export_utils import replace_for_export
try:
import onnx_graphsurgeon as gs
ONNX_GRAPHSURGEON_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
ONNX_GRAPHSURGEON_AVAILABLE = False
__all__ = ['ExportFormat', 'Exportable']
class ExportFormat(Enum):
"""Which format to use when exporting a Neural Module for deployment"""
ONNX = (1,)
TORCHSCRIPT = (2,)
_EXT_DICT = {
".pt": ExportFormat.TORCHSCRIPT,
".ts": ExportFormat.TORCHSCRIPT,
".onnx": ExportFormat.ONNX,
}
class Exportable(ABC):
"""
This Interface should be implemented by particular classes derived from nemo.core.NeuralModule or nemo.core.ModelPT.
It gives these entities ability to be exported for deployment to formats such as ONNX.
"""
@staticmethod
def get_format(filename: str):
_, ext = os.path.splitext(filename)
try:
return _EXT_DICT[ext]
except KeyError:
raise ValueError(f"Export file {filename} extension does not correspond to any export format!")
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def get_input_names(self, input_example):
if isinstance(input_example, Dict):
input_names = list(input_example.keys())
else:
if not (hasattr(self, 'input_types')):
raise NotImplementedError(
'For export to work you must define input_types or pass names in input_example'
)
input_names = list(self.input_types.keys())
# remove unnecessary inputs for input_ports
for name in self.disabled_deployment_input_names:
input_names.remove(name)
return input_names
def get_output_names(self, output_example):
if isinstance(output_example, Dict):
output_names = list(output_example.keys())
else:
if not (hasattr(self, 'output_types')):
raise NotImplementedError(
'For export to work you must define output_types or pass names in output_example'
)
output_names = list(self.output_types.keys())
# remove unnecessary inputs for input_ports
for name in self.disabled_deployment_output_names:
output_names.remove(name)
return output_names
def get_input_dynamic_axes(self, input_names):
dynamic_axes = defaultdict(list)
for name in input_names:
dynamic_axes = {
**dynamic_axes,
**self._extract_dynamic_axes(name, self.input_types[name]),
}
return dynamic_axes
def get_output_dynamic_axes(self, output_names):
dynamic_axes = defaultdict(list)
for name in output_names:
dynamic_axes = {
**dynamic_axes,
**self._extract_dynamic_axes(name, self.output_types[name]),
}
return dynamic_axes
def export(
self,
output: str,
input_example=None,
output_example=None,
verbose=False,
export_params=True,
do_constant_folding=True,
keep_initializers_as_inputs=False,
onnx_opset_version: int = 12,
try_script: bool = False,
set_eval: bool = True,
check_trace: bool = True,
use_dynamic_axes: bool = True,
dynamic_axes=None,
check_tolerance=0.01,
forward_method=None,
):
my_args = locals()
del my_args['self']
qual_name = self.__module__ + '.' + self.__class__.__qualname__
output_descr = qual_name + ' exported to ONNX'
try:
# Disable typechecks
typecheck.set_typecheck_enabled(enabled=False)
# Allow user to completely override forward method to export
if forward_method is None and hasattr(type(self), "forward_for_export"):
forward_method = type(self).forward_for_export
if forward_method:
old_forward_method = type(self).forward
type(self).forward = forward_method
# Set module to eval mode
if set_eval:
self.eval()
format = self.get_format(output)
if input_example is None:
input_example = self.input_module.input_example()
if isinstance(input_example, Dict):
input_example = tuple(input_example.values())
my_args['input_example'] = input_example
self._prepare_for_export(**my_args)
if output_example is None:
if isinstance(input_example, tuple):
output_example = self.forward(*input_example)
else:
output_example = self.forward(input_example)
input_names = self.input_module.get_input_names(input_example)
output_names = self.output_module.get_output_names(output_example)
with torch.jit.optimized_execution(True), torch.no_grad():
jitted_model = None
if try_script:
try:
jitted_model = torch.jit.script(self)
except Exception as e:
print("jit.script() failed!", e)
with torch.jit.optimized_execution(True), torch.no_grad():
if format == ExportFormat.TORCHSCRIPT:
if jitted_model is None:
jitted_model = torch.jit.trace(
self,
input_example,
strict=False,
optimize=True,
check_trace=check_trace,
check_tolerance=check_tolerance,
)
if verbose:
print(jitted_model.code)
jitted_model.save(output)
assert os.path.exists(output)
elif format == ExportFormat.ONNX:
if jitted_model is None:
jitted_model = self
# dynamic axis is a mapping from input/output_name => list of "dynamic" indices
if dynamic_axes is None and use_dynamic_axes:
dynamic_axes = self.input_module.get_input_dynamic_axes(input_names)
dynamic_axes = {**dynamic_axes, **self.output_module.get_output_dynamic_axes(output_names)}
torch.onnx.export(
jitted_model,
input_example,
output,
input_names=input_names,
output_names=output_names,
verbose=verbose,
export_params=export_params,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
opset_version=onnx_opset_version,
example_outputs=output_example,
)
# Verify the model can be read, and is valid
onnx_model = onnx.load(output)
onnx.checker.check_model(onnx_model, full_check=True)
if do_constant_folding:
if not ONNX_GRAPHSURGEON_AVAILABLE:
logging.info(
f"onnx-graphsurgeon module is not instlled."
"That may result in suboptimal optimization of exported ONNX graph (including unneeded DOUBLE initializers)."
"Please follow the instructions available at:"
"https://github.com/NVIDIA/TensorRT/tree/master/tools/onnx-graphsurgeon"
"to install onnx-graphsurgeon from source to improve exported graph."
)
else:
# This pass is to remove/recast certain constants that are generated as 'double'
# Those constants break ONNX -> TRT conversion (TRT does not support 'double' as of 7.2)
# Can probably be removed once TRT has automatic downcast for double.
# However, it may still be useful even then as it seems to always make the graph shorter.
graph = gs.import_onnx(onnx_model)
onnx_model = gs.export_onnx(graph.fold_constants().cleanup())
onnx.checker.check_model(onnx_model, full_check=True)
onnx.save(onnx_model, output)
else:
raise ValueError(f'Encountered unknown export format {format}.')
finally:
typecheck.set_typecheck_enabled(enabled=True)
if forward_method:
type(self).forward = old_forward_method
return ([output], [output_descr])
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set()
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set()
@property
def supported_export_formats(self):
"""Implement this method to return a set of export formats supported. Default is all types."""
return set([ExportFormat.ONNX, ExportFormat.TORCHSCRIPT])
@staticmethod
def _extract_dynamic_axes(name: str, ntype: NeuralType):
"""
Implement this method to provide dynamic axes id for ONNX export.
By default, this method will extract BATCH and TIME dimension ids from each provided input/output name argument.
For example, if module/model accepts argument named "input_signal" with type corresponding to [Batch, Time, Dim]
shape, then the returned result should contain "input_signal" -> [0, 1] because Batch and Time are dynamic axes
as they can change from call to call during inference.
Args:
name: Name of input or output parameter
ntype: Corresponding Neural Type
Returns:
"""
dynamic_axes = defaultdict(list)
if ntype.axes:
for ind, axis in enumerate(ntype.axes):
if axis.kind in [AxisKind.Batch, AxisKind.Time, AxisKind.Width, AxisKind.Height]:
dynamic_axes[name].append(ind)
return dynamic_axes
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
replace_1D_2D = kwargs.get('replace_1D_2D', False)
replace_for_export(self, replace_1D_2D)
|
the-stack_106_27319 | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from torch import nn
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, int(channel / reduction), bias=False),
nn.ReLU(inplace=True),
nn.Linear(int(channel / reduction), channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
|
the-stack_106_27320 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 1 15:24:54 2020
@author: magnus
"""
import numpy as np
#from randomSequence import *
# xvals = np.random.rand(-1,1,len(xvals))
# yvals = np.random.rand(-1,1,len(yvals))
# vals = [[x, y] for x, y in zip(xvals, yvals)]
# print(vals)
def circleAreaMC(xvals, yvals):
n = 0
for i in range(np.size(xvals)):
if np.linalg.norm(np.array([xvals[i],yvals[i]])) < 1:
n += 1
Ac = 4*(n/np.size(xvals))
return Ac |
the-stack_106_27323 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Dict, TYPE_CHECKING
from cirq.protocols.json_serialization import ObjectFactory
if TYPE_CHECKING:
import cirq.ops.pauli_gates
import cirq.devices.unconstrained_device
@functools.lru_cache()
def _class_resolver_dictionary() -> Dict[str, ObjectFactory]:
import cirq
from cirq.ops import raw_types
import pandas as pd
import numpy as np
from cirq.devices.noise_model import _NoNoiseModel
from cirq.experiments import CrossEntropyResult, CrossEntropyResultDict, GridInteractionLayer
from cirq.experiments.grid_parallel_two_qubit_xeb import GridParallelXEBMetadata
def _identity_operation_from_dict(qubits, **kwargs):
return cirq.identity_each(*qubits)
def single_qubit_matrix_gate(matrix):
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix, dtype=np.complex128)
return cirq.MatrixGate(matrix, qid_shape=(matrix.shape[0],))
def two_qubit_matrix_gate(matrix):
if not isinstance(matrix, np.ndarray):
matrix = np.array(matrix, dtype=np.complex128)
return cirq.MatrixGate(matrix, qid_shape=(2, 2))
def _parallel_gate_op(gate, qubits):
return cirq.parallel_gate_op(gate, *qubits)
import sympy
return {
'AmplitudeDampingChannel': cirq.AmplitudeDampingChannel,
'AsymmetricDepolarizingChannel': cirq.AsymmetricDepolarizingChannel,
'BitFlipChannel': cirq.BitFlipChannel,
'BitstringAccumulator': cirq.work.BitstringAccumulator,
'BooleanHamiltonian': cirq.BooleanHamiltonian,
'ProductState': cirq.ProductState,
'CCNotPowGate': cirq.CCNotPowGate,
'CCXPowGate': cirq.CCXPowGate,
'CCZPowGate': cirq.CCZPowGate,
'CNotPowGate': cirq.CNotPowGate,
'ControlledGate': cirq.ControlledGate,
'ControlledOperation': cirq.ControlledOperation,
'CSwapGate': cirq.CSwapGate,
'CXPowGate': cirq.CXPowGate,
'CZPowGate': cirq.CZPowGate,
'CrossEntropyResult': CrossEntropyResult,
'CrossEntropyResultDict': CrossEntropyResultDict,
'Circuit': cirq.Circuit,
'CircuitOperation': cirq.CircuitOperation,
'CliffordState': cirq.CliffordState,
'CliffordTableau': cirq.CliffordTableau,
'DepolarizingChannel': cirq.DepolarizingChannel,
'ConstantQubitNoiseModel': cirq.ConstantQubitNoiseModel,
'Duration': cirq.Duration,
'FrozenCircuit': cirq.FrozenCircuit,
'FSimGate': cirq.FSimGate,
'DensePauliString': cirq.DensePauliString,
'MutableDensePauliString': cirq.MutableDensePauliString,
'MutablePauliString': cirq.MutablePauliString,
'ObservableMeasuredResult': cirq.work.ObservableMeasuredResult,
'GateOperation': cirq.GateOperation,
'GeneralizedAmplitudeDampingChannel': cirq.GeneralizedAmplitudeDampingChannel,
'GlobalPhaseOperation': cirq.GlobalPhaseOperation,
'GridInteractionLayer': GridInteractionLayer,
'GridParallelXEBMetadata': GridParallelXEBMetadata,
'GridQid': cirq.GridQid,
'GridQubit': cirq.GridQubit,
'HPowGate': cirq.HPowGate,
'ISwapPowGate': cirq.ISwapPowGate,
'IdentityGate': cirq.IdentityGate,
'IdentityOperation': _identity_operation_from_dict,
'InitObsSetting': cirq.work.InitObsSetting,
'KrausChannel': cirq.KrausChannel,
'LinearDict': cirq.LinearDict,
'LineQubit': cirq.LineQubit,
'LineQid': cirq.LineQid,
'LineTopology': cirq.LineTopology,
'MatrixGate': cirq.MatrixGate,
'MixedUnitaryChannel': cirq.MixedUnitaryChannel,
'MeasurementKey': cirq.MeasurementKey,
'MeasurementGate': cirq.MeasurementGate,
'_MeasurementSpec': cirq.work._MeasurementSpec,
'Moment': cirq.Moment,
'_XEigenState': cirq.value.product_state._XEigenState, # type: ignore
'_YEigenState': cirq.value.product_state._YEigenState, # type: ignore
'_ZEigenState': cirq.value.product_state._ZEigenState, # type: ignore
'_NoNoiseModel': _NoNoiseModel,
'NamedQubit': cirq.NamedQubit,
'NamedQid': cirq.NamedQid,
'NoIdentifierQubit': cirq.testing.NoIdentifierQubit,
'_PauliX': cirq.ops.pauli_gates._PauliX,
'_PauliY': cirq.ops.pauli_gates._PauliY,
'_PauliZ': cirq.ops.pauli_gates._PauliZ,
'ParamResolver': cirq.ParamResolver,
'ParallelGateOperation': _parallel_gate_op, # Removed in v0.14
'ParallelGate': cirq.ParallelGate,
'PauliMeasurementGate': cirq.PauliMeasurementGate,
'PauliString': cirq.PauliString,
'PhaseDampingChannel': cirq.PhaseDampingChannel,
'PhaseFlipChannel': cirq.PhaseFlipChannel,
'PhaseGradientGate': cirq.PhaseGradientGate,
'PhasedFSimGate': cirq.PhasedFSimGate,
'PhasedISwapPowGate': cirq.PhasedISwapPowGate,
'PhasedXPowGate': cirq.PhasedXPowGate,
'PhasedXZGate': cirq.PhasedXZGate,
'StatePreparationChannel': cirq.StatePreparationChannel,
'ProjectorString': cirq.ProjectorString,
'ProjectorSum': cirq.ProjectorSum,
'RandomGateChannel': cirq.RandomGateChannel,
'QuantumFourierTransformGate': cirq.QuantumFourierTransformGate,
'RepetitionsStoppingCriteria': cirq.work.RepetitionsStoppingCriteria,
'ResetChannel': cirq.ResetChannel,
'SingleQubitCliffordGate': cirq.SingleQubitCliffordGate,
'SingleQubitMatrixGate': single_qubit_matrix_gate,
'SingleQubitPauliStringGateOperation': cirq.SingleQubitPauliStringGateOperation,
'SingleQubitReadoutCalibrationResult': cirq.experiments.SingleQubitReadoutCalibrationResult,
'StabilizerStateChForm': cirq.StabilizerStateChForm,
'SwapPowGate': cirq.SwapPowGate,
'SymmetricalQidPair': cirq.SymmetricalQidPair,
'TaggedOperation': cirq.TaggedOperation,
'TiltedSquareLattice': cirq.TiltedSquareLattice,
'TrialResult': cirq.Result, # keep support for Cirq < 0.11.
'Result': cirq.Result,
'Rx': cirq.Rx,
'Ry': cirq.Ry,
'Rz': cirq.Rz,
'TwoQubitMatrixGate': two_qubit_matrix_gate,
'_UnconstrainedDevice': cirq.devices.unconstrained_device._UnconstrainedDevice,
'VarianceStoppingCriteria': cirq.work.VarianceStoppingCriteria,
'VirtualTag': cirq.VirtualTag,
'WaitGate': cirq.WaitGate,
'_QubitAsQid': raw_types._QubitAsQid,
# The formatter keeps putting this back
# pylint: disable=line-too-long
'XEBPhasedFSimCharacterizationOptions': cirq.experiments.XEBPhasedFSimCharacterizationOptions,
# pylint: enable=line-too-long
'XPowGate': cirq.XPowGate,
'XXPowGate': cirq.XXPowGate,
'YPowGate': cirq.YPowGate,
'YYPowGate': cirq.YYPowGate,
'ZPowGate': cirq.ZPowGate,
'ZZPowGate': cirq.ZZPowGate,
# not a cirq class, but treated as one:
'pandas.DataFrame': pd.DataFrame,
'pandas.Index': pd.Index,
'pandas.MultiIndex': pd.MultiIndex.from_tuples,
'sympy.Symbol': sympy.Symbol,
'sympy.Add': lambda args: sympy.Add(*args),
'sympy.Mul': lambda args: sympy.Mul(*args),
'sympy.Pow': lambda args: sympy.Pow(*args),
'sympy.Float': lambda approx: sympy.Float(approx),
'sympy.Integer': sympy.Integer,
'sympy.Rational': sympy.Rational,
'sympy.pi': lambda: sympy.pi,
'sympy.E': lambda: sympy.E,
'sympy.EulerGamma': lambda: sympy.EulerGamma,
'complex': complex,
}
|
the-stack_106_27324 | """
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import array2d, as_float_array, check_random_state, deprecated
from ..utils.extmath import fast_dot
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w: array of shape(n), to be orthogonalized
W: array of shape(p, n), null space definition
j: int < p
caveats
-------
assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for _ in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
W[j, :] = w
return W
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge.' +
' You might want' +
' to increase the number of iterations.')
return W
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True):
"""Perform Fast Independent Component Analysis.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_components, n_samples) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = array2d(X, copy=whiten).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
return K, W, S, X_mean
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
return None, W, S, None
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : 2D array, shape (n_components, n_features)
The unmixing matrix.
`mixing_` : array, shape (n_features, n_components)
The mixing matrix.
`sources_` : 2D array, shape (n_samples, n_components)
The estimated latent sources of the data. This attribute is
deprecated and will be removed in 0.16. Use `fit_transform` instead and
store the result.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=True) # will become False in 0.16
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X, copy=copy)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
@deprecated('To be removed in 0.16. Use the `mixing_` attribute.')
def get_mixing_matrix(self):
"""Compute the mixing matrix.
Returns
-------
mixing_matrix : array, shape (n_features, n_components)
"""
return self.mixing_
@property
@deprecated('To be removed in 0.16. Use `fit_transform` and store the '
'output instead.')
def sources_(self):
return self.__sources
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
if copy:
X = X.copy()
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
the-stack_106_27326 | # =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
import os
import numpy as np
import nemo
import nemo.collections.nlp as nemo_nlp
from nemo import logging
from nemo.collections.nlp.data import NemoBertTokenizer
from nemo.collections.nlp.nm.trainables import TokenClassifier
from nemo.collections.nlp.utils.data_utils import get_vocab
# Parsing arguments
parser = argparse.ArgumentParser(description='NER with pretrained BERT')
parser.add_argument("--max_seq_length", default=128, type=int)
parser.add_argument("--fc_dropout", default=0, type=float)
parser.add_argument("--pretrained_bert_model", default="bert-base-cased", type=str)
parser.add_argument("--none_label", default='O', type=str)
parser.add_argument(
"--queries",
action='append',
default=[
'we bought four shirts from the nvidia gear ' + 'store in santa clara',
'Nvidia is a company',
'The Adventures of Tom Sawyer by Mark Twain '
+ 'is an 1876 novel about a young boy growing '
+ 'up along the Mississippi River',
],
help="Example: --queries 'San Francisco' --queries 'LA'",
)
parser.add_argument(
"--add_brackets",
action='store_false',
help="Whether to take predicted label in brackets or \
just append to word in the output",
)
parser.add_argument("--work_dir", default='output/checkpoints', type=str)
parser.add_argument("--labels_dict", default='label_ids.csv', type=str)
parser.add_argument("--amp_opt_level", default="O0", type=str, choices=["O0", "O1", "O2"])
args = parser.parse_args()
logging.info(args)
if not os.path.exists(args.work_dir):
raise ValueError(f'Work directory not found at {args.work_dir}')
if not os.path.exists(args.labels_dict):
raise ValueError(f'Dictionary with ids to labels not found at {args.labels_dict}')
nf = nemo.core.NeuralModuleFactory(
backend=nemo.core.Backend.PyTorch, optimization_level=args.amp_opt_level, log_dir=None
)
labels_dict = get_vocab(args.labels_dict)
""" Load the pretrained BERT parameters
See the list of pretrained models, call:
nemo_nlp.huggingface.BERT.list_pretrained_models()
"""
pretrained_bert_model = nemo_nlp.nm.trainables.huggingface.BERT(pretrained_model_name=args.pretrained_bert_model)
hidden_size = pretrained_bert_model.hidden_size
tokenizer = NemoBertTokenizer(args.pretrained_bert_model)
data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationInferDataLayer(
queries=args.queries, tokenizer=tokenizer, max_seq_length=args.max_seq_length, batch_size=1
)
classifier = TokenClassifier(hidden_size=hidden_size, num_classes=len(labels_dict), dropout=args.fc_dropout)
input_ids, input_type_ids, input_mask, _, subtokens_mask = data_layer()
hidden_states = pretrained_bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
logits = classifier(hidden_states=hidden_states)
###########################################################################
# Instantiate an optimizer to perform `infer` action
evaluated_tensors = nf.infer(tensors=[logits, subtokens_mask], checkpoint_dir=args.work_dir)
def concatenate(lists):
return np.concatenate([t.cpu() for t in lists])
def add_brackets(text, add=args.add_brackets):
return '[' + text + ']' if add else text
logits, subtokens_mask = [concatenate(tensors) for tensors in evaluated_tensors]
preds = np.argmax(logits, axis=2)
for i, query in enumerate(args.queries):
logging.info(f'Query: {query}')
pred = preds[i][subtokens_mask[i] > 0.5]
words = query.strip().split()
if len(pred) != len(words):
raise ValueError('Pred and words must be of the same length')
output = ''
for j, w in enumerate(words):
output += w
label = labels_dict[pred[j]]
if label != args.none_label:
label = add_brackets(label)
output += label
output += ' '
logging.info(f'Combined: {output.strip()}')
|
the-stack_106_27327 | import json
import scrapy
from locations.hourstudy import inputoutput
def process_hours(hours):
if 'Hours' not in hours:
return None
opening_hours = ''
days = hours['Hours']
for day in days:
shortname = day['ShortName']
if '-' in shortname:
startday, endday = shortname.split('-')
if startday == "M":
startday = "Mo"
if endday == "M":
endday = "Mo"
shortname = '-'.join([startday, endday])
timeperiod = day['TimePeriod']
starttime = timeperiod['BeginTime'][:-3]
endtime = timeperiod['ThruTime'][:-3]
time_together = '-'.join([starttime, endtime])
opening_hours += "{shortname} {time_together};".format(
shortname=shortname,
time_together=time_together
)
return opening_hours[:-1]
class TargetSpider(scrapy.Spider):
''' The Target api allows us a maximum search radius of 625 miles, so we have
to use multiple api requests with zip codes from different states to
search the whole country.
NOTE: There is a "key" variable used in the url for the requests.
It seems that this does not vary between natural requests (those sent
from a browser), but it might be something that expires and renders requests
invalid. So, if this spider stops working, that's probably why.
'''
name = "target"
allowed_domains = ["target.com"]
def start_requests(self):
zips = [
'97062', '90021', '73301', '82633', '29401', '60007', '04019',
'87101', '59601', '39056', '14201', '55111', '94203', '64030',
'33601', '10001', '98101', '37011', '58501', '78501', '97501'
]
template = 'https://api.target.com/v2/store?nearby={zipcode}&range=625&limit=999999&locale=en-US&key=eb2551e4accc14f38cc42d32fbc2b2ea'
headers = {
'Accept': 'application/json',
}
for zipcode in zips:
yield scrapy.http.FormRequest(
url=template.format(zipcode=zipcode),
method='GET',
headers=headers,
callback=self.parse
)
def parse(self, response):
data = json.loads(response.body_as_unicode())
stores = data['Locations']['Location']
for store in stores:
loc_info = store['Address']
properties = {
'ref': store['ID'],
'name': store['Name'],
'addr_full': loc_info['AddressLine1'],
'city': loc_info['City'],
'state': loc_info['Subdivision'],
'country': loc_info['CountryName'],
'postcode': loc_info['PostalCode'],
'lat': loc_info['Latitude'],
'lon': loc_info['Longitude'],
}
phones = store['TelephoneNumber']
if type(phones) == "list":
for i in phones:
if i['FunctionalTypeDescription'] == 'Main':
properties['phone'] = i['PhoneNumber']
elif type(phones) == 'dict':
if phones['FunctionalTypeDescription'] == 'Main':
properties['phone'] = phones['PhoneNumber']
if 'OperatingHours' in store:
processed_hours = process_hours(store['OperatingHours'])
if processed_hours:
properties['opening_hours'] = processed_hours
raw = store['OperatingHours']
formatted = processed_hours
yield inputoutput(raw,formatted)
# yield inputoutput(**properties)
|
the-stack_106_27328 | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pytest
from azext_iot.tests.generators import generate_generic_id
from azext_iot.tests.settings import DynamoSettings
from azure.cli.testsdk import LiveScenarioTest
from azext_iot.common.embedded_cli import EmbeddedCLI
from knack.log import get_logger
logger = get_logger(__name__)
MOCK_RESOURCE_TAGS = "a=b c=d"
MOCK_RESOURCE_TAGS_DICT = {"a": "b", "c": "d"}
MOCK_DEAD_LETTER_ENDPOINT = "https://accountname.blob.core.windows.net/containerName"
MOCK_DEAD_LETTER_SECRET = "{}?sasToken".format(MOCK_DEAD_LETTER_ENDPOINT)
REGION_RESOURCE_LIMIT = 10
REGION_LIST = ["westus2", "westcentralus", "eastus2", "eastus", "eastus2euap"]
required_test_env_vars = ["azext_iot_testrg"]
resource_test_env_vars = [
"azext_dt_ep_eventhub_namespace",
"azext_dt_ep_eventhub_policy",
"azext_dt_ep_eventhub_topic",
"azext_dt_ep_servicebus_namespace",
"azext_dt_ep_servicebus_policy",
"azext_dt_ep_servicebus_topic",
"azext_dt_ep_eventgrid_topic",
"azext_dt_ep_rg",
"azext_dt_region",
]
settings = DynamoSettings(req_env_set=required_test_env_vars, opt_env_set=resource_test_env_vars)
# Endpoint resource group
EP_RG = settings.env.azext_dt_ep_rg or settings.env.azext_iot_testrg
# EventHub
EP_EVENTHUB_NAMESPACE = settings.env.azext_dt_ep_eventhub_namespace or ("test-ehn-" + generate_generic_id())
EP_EVENTHUB_POLICY = settings.env.azext_dt_ep_eventhub_policy or ("test-ehp-" + generate_generic_id())
EP_EVENTHUB_TOPIC = settings.env.azext_dt_ep_eventhub_topic or ("test-eh-" + generate_generic_id())
# Service Bus
EP_SERVICEBUS_NAMESPACE = settings.env.azext_dt_ep_servicebus_namespace or ("test-sbn-" + generate_generic_id())
EP_SERVICEBUS_POLICY = settings.env.azext_dt_ep_servicebus_policy or ("test-sbp-" + generate_generic_id())
EP_SERVICEBUS_TOPIC = settings.env.azext_dt_ep_servicebus_topic or ("test-sbt-" + generate_generic_id())
# EventGrid
EP_EVENTGRID_TOPIC = settings.env.azext_dt_ep_eventgrid_topic or ("test-egt-" + generate_generic_id())
def generate_resource_id():
return "dtcli-{}".format(generate_generic_id())
class DTLiveScenarioTest(LiveScenarioTest):
role_map = {
"owner": "Azure Digital Twins Data Owner",
"reader": "Azure Digital Twins Data Reader",
}
def __init__(self, test_scenario):
assert test_scenario
super(DTLiveScenarioTest, self).__init__(test_scenario)
self.embedded_cli = EmbeddedCLI()
self._bootup_scenario()
def _bootup_scenario(self):
self._is_provider_registered()
self._init_basic_env_vars()
self.tracked_instances = []
def _is_provider_registered(self):
result = self.cmd(
"provider show --namespace 'Microsoft.DigitalTwins' --query 'registrationState'"
)
if '"registered"' in result.output.lower():
return
pytest.skip(
"Microsoft.DigitalTwins provider not registered. "
"Run 'az provider register --namespace Microsoft.DigitalTwins'"
)
def _init_basic_env_vars(self):
self._force_region = settings.env.azext_dt_region
if self._force_region and not self.is_region_available(self._force_region):
raise RuntimeError(
"Forced region: {} does not have capacity.".format(self._force_region)
)
self.region = (
self._force_region if self._force_region else self.get_available_region()
)
self.rg = settings.env.azext_iot_testrg
if not self.rg:
pytest.skip(
"Digital Twins CLI tests requires at least 'azext_iot_testrg' for resource deployment."
)
self.rg_region = self.embedded_cli.invoke(
"group show --name {}".format(self.rg)
).as_json()["location"]
@property
def current_user(self):
return self.embedded_cli.invoke("account show").as_json()["user"]["name"]
@property
def current_subscription(self):
return self.embedded_cli.invoke("account show").as_json()["id"]
def wait_for_capacity(
self, region=None, capacity: int = 1, wait_in_sec: int = 10, interval: int = 3
):
from time import sleep
target_region = region
if not target_region:
target_region = self.region
if self.is_region_available(region=target_region, capacity=capacity):
return
while interval >= 1:
logger.info("Waiting :{} (sec) for capacity.")
sleep(wait_in_sec)
if self.is_region_available(region=target_region, capacity=capacity):
return
interval = interval - 1
raise RuntimeError(
"Unavailable region DT capacity. wait(sec): {}, interval: {}, region: {}, capacity: {}".format(
wait_in_sec, interval, target_region, capacity
)
)
def is_region_available(self, region, capacity: int = 1):
region_capacity = self.calculate_region_capacity
return (region_capacity.get(region, 0) + capacity) <= REGION_RESOURCE_LIMIT
@property
def calculate_region_capacity(self) -> dict:
instances = self.instances = self.embedded_cli.invoke("dt list").as_json()
capacity_map = {}
for instance in instances:
cap_val = capacity_map.get(instance["location"], 0)
cap_val = cap_val + 1
capacity_map[instance["location"]] = cap_val
for region in REGION_LIST:
if region not in capacity_map:
capacity_map[region] = 0
return capacity_map
def get_available_region(self, capacity: int = 1, skip_regions: list = None) -> str:
if not skip_regions:
skip_regions = []
region_capacity = self.calculate_region_capacity
while region_capacity:
region = min(region_capacity, key=region_capacity.get)
if region not in skip_regions:
if region_capacity[region] + capacity <= REGION_RESOURCE_LIMIT:
return region
region_capacity.pop(region, None)
raise RuntimeError(
"There are no available regions with capacity: {} for provision DT instances in subscription: {}".format(
capacity, self.current_subscription
)
)
def ensure_eventhub_resource(self):
"""Ensure that the test has all Event hub resources."""
if not settings.env.azext_dt_ep_eventhub_namespace:
self.embedded_cli.invoke(
"eventhubs namespace create --name {} --resource-group {}".format(
EP_EVENTHUB_NAMESPACE,
EP_RG,
)
)
if not settings.env.azext_dt_ep_eventhub_topic:
self.embedded_cli.invoke(
"eventhubs eventhub create --namespace-name {} --resource-group {} --name {}".format(
EP_EVENTHUB_NAMESPACE,
EP_RG,
EP_EVENTHUB_TOPIC
)
)
if not settings.env.azext_dt_ep_eventhub_policy:
self.embedded_cli.invoke(
"eventhubs eventhub authorization-rule create --namespace-name {} --resource-group {} "
"--eventhub-name {} --name {} --rights Send".format(
EP_EVENTHUB_NAMESPACE,
EP_RG,
EP_EVENTHUB_TOPIC,
EP_EVENTHUB_POLICY
)
)
def ensure_eventgrid_resource(self):
"""Ensure that the test has the Event Grid."""
if not settings.env.azext_dt_ep_eventgrid_topic:
self.embedded_cli.invoke(
"eventgrid topic create --name {} --resource-group {} -l {}".format(
EP_EVENTGRID_TOPIC,
EP_RG,
self.region,
)
)
def ensure_servicebus_resource(self):
"""Ensure that the test has all Service Bus resources."""
if not settings.env.azext_dt_ep_servicebus_namespace:
self.embedded_cli.invoke(
"servicebus namespace create --name {} --resource-group {}".format(
EP_SERVICEBUS_NAMESPACE,
EP_RG,
)
)
if not settings.env.azext_dt_ep_servicebus_topic:
self.embedded_cli.invoke(
"servicebus topic create --namespace-name {} --resource-group {} --name {}".format(
EP_SERVICEBUS_NAMESPACE,
EP_RG,
EP_SERVICEBUS_TOPIC
)
)
if not settings.env.azext_dt_ep_servicebus_policy:
self.embedded_cli.invoke(
"servicebus topic authorization-rule create --namespace-name {} --resource-group {} "
"--topic-name {} --name {} --rights Send".format(
EP_SERVICEBUS_NAMESPACE,
EP_RG,
EP_SERVICEBUS_TOPIC,
EP_SERVICEBUS_POLICY
)
)
def delete_eventhub_resources(self):
"""Delete all created resources for endpoint tests."""
# Eventhub
if not settings.env.azext_dt_ep_eventhub_namespace:
self.embedded_cli.invoke(
"eventhubs namespace delete --name {} --resource-group {}".format(
EP_EVENTHUB_NAMESPACE,
EP_RG,
)
)
elif not settings.env.azext_dt_ep_eventhub_topic:
self.embedded_cli.invoke(
"eventhubs eventhub delete --namespace-name {} --resource-group {} --name {}".format(
EP_EVENTHUB_NAMESPACE,
EP_RG,
EP_EVENTHUB_TOPIC
)
)
elif not settings.env.azext_dt_ep_eventhub_policy:
self.embedded_cli.invoke(
"eventhubs eventhub authorization-rule delete --namespace-name {} --resource-group {} "
"--topic-name {} --name {} --rights Send".format(
EP_EVENTHUB_NAMESPACE,
EP_RG,
EP_EVENTHUB_TOPIC,
EP_EVENTHUB_POLICY
)
)
def delete_eventgrid_resources(self):
"""Delete all created resources for endpoint tests."""
# Event Grid
if not settings.env.azext_dt_ep_eventgrid_topic:
self.embedded_cli.invoke(
"eventgrid topic delete --name {} --resource-group {}".format(
EP_EVENTGRID_TOPIC,
EP_RG,
)
)
def delete_servicebus_resources(self):
"""Delete all created resources for endpoint tests."""
# Service Bus
if not settings.env.azext_dt_ep_servicebus_namespace:
self.embedded_cli.invoke(
"servicebus namespace delete --name {} --resource-group {}".format(
EP_SERVICEBUS_NAMESPACE,
EP_RG,
)
)
elif not settings.env.azext_dt_ep_servicebus_topic:
self.embedded_cli.invoke(
"servicebus topic delete --namespace-name {} --resource-group {} --name {}".format(
EP_SERVICEBUS_NAMESPACE,
EP_RG,
EP_SERVICEBUS_TOPIC
)
)
elif not settings.env.azext_dt_ep_servicebus_policy:
self.embedded_cli.invoke(
"servicebus topic authorization-rule delete --namespace-name {} --resource-group {} "
"--topic-name {} --name {} ".format(
EP_SERVICEBUS_NAMESPACE,
EP_RG,
EP_SERVICEBUS_TOPIC,
EP_SERVICEBUS_POLICY
)
)
def track_instance(self, instance: dict):
self.tracked_instances.append((instance["name"], instance["resourceGroup"]))
def tearDown(self):
for instance in self.tracked_instances:
try:
self.embedded_cli.invoke(
"dt delete -n {} -g {} -y --no-wait".format(instance[0], instance[1])
)
except Exception:
logger.info("The DT instance {} has already been deleted.".format(instance))
|
the-stack_106_27329 | """
Data container for ligand-kinase data.
"""
import logging
import pandas as pd
from src.evaluation.data.base_data import BaseData
logger = logging.getLogger(__name__)
class LigandVsKinaseData(BaseData):
"""
Prepare data to compare ligand- and kinase-focused data.
Attributes
----------
ligand_query : str
Ligand name (kinases are extracted from `ligand_kinase_matrix` by this ligand).
kinase_query : str
Kinase name (kinases are extracted from `kinase_kinase_matrix` by this kinase).
ligand_kinase_method : str
Name for ligand profiling method to be used as identifier.
kinase_kinase_method : str
Name for kinase distances method to be used as identifier.
data : pandas.DataFrame
Merged ligand-kinase and kinase-kinase datasets.
n_kinases_by_kinase : int
Number of kinases in kinase-kinase method
n_kinases_by_ligand : int
Number of kinases in ligand-kinase method
n_kinases_shared : int
Number of shared kinases
n_active_kinases_shared : int
Number of shared active kinases
"""
def __init__(
self,
ligand_query,
kinase_query,
ligand_kinase_method,
kinase_kinase_method,
ligand_kinase_matrix,
kinase_kinase_matrix,
kinase_activity_cutoff,
kinase_activity_max,
):
"""
Initiate dataset to compare ligand- and kinase-focused data.
Parameters
----------
ligand_query : str
Ligand name (kinases are extracted from `ligand_kinase_matrix` by this ligand).
kinase_query : str
Kinase name (kinases are extracted from `kinase_kinase_matrix` by this kinase).
ligand_kinase_method : str
Name for ligand profiling method to be used as identifier.
kinase_kinase_method : str
Name for kinase distances method to be used as identifier.
ligand_kinase_matrix : pandas.DataFrame
Ligand-kinase activity matrix.
kinase_kinase_matrix : pandas.DataFrame
Kinase-kinase distance matrix.
kinase_activity_cutoff : float
Cutoff value to be used to determine activity. By default this cutoff is the maximum
value. Set `kinase_activity_max=False` if cutoff is the minimum value.
kinase_activity_max : bool
If `True` (default), the `kinase_activity_cutoff` is used as the maximum cutoff, else
as the minimum cutoff.
"""
self.ligand_query = ligand_query
self.kinase_query = kinase_query
self.ligand_kinase_method = ligand_kinase_method
self.kinase_kinase_method = kinase_kinase_method
self.data = None
self.n_kinases_by_kinase = None
self.n_kinases_by_ligand = None
self.n_kinases_shared = None
self.n_active_kinases_shared = None
(
self.data,
self.n_kinases_by_kinase,
self.n_kinases_by_ligand,
self.n_kinases_shared,
self.n_active_kinases_shared,
) = self._merge_datasets(
ligand_kinase_matrix,
kinase_kinase_matrix,
kinase_activity_cutoff,
kinase_activity_max,
)
def _merge_datasets(
self,
ligand_kinase_matrix,
kinase_kinase_matrix,
kinase_activity_cutoff,
kinase_activity_max,
):
"""
Compare kinase ranks between a ligand profiling dataset and a kinase distances dataset.
The profiling dataset contains kinases for the query ligand extracted from the
ligand-kinase matrix. The distances dataset contains kinases for the query kinase extracted
from the kinase-kinase matrix.
Returns
-------
kinase_data : pandas.DataFrame
Contains for each kinase (rows) details on profiling and distances ranks (columns):
<ligand_kinase_method>.measure : float
Ligand profiling data.
<ligand_kinase_method>.active : bool
Active kinase?
<ligand_kinase_method>.rank1 : float
Kinase rank by profiling data based on all ligand-kinase method data points.
<ligand_kinase_method>.rank2 : float
Kinase rank by profiling distances based on shared data points only.
<kinase_kinase_method>.measure : float
Kinase distances
<kinase_kinase_method>.rank1 : float
Kinase rank by kinase distances based on all kinase-kinase method data points.
<kinase_kinase_method>.rank2 : float
Kinase rank by kinase distances based on shared data points only.
n_kinases_by_kinase : int
Number of kinases in kinase-kinase method
n_kinases_by_ligand : int
Number of kinases in ligand-kinase method
n_kinases_shared : int
Number of shared kinases
n_active_kinases_shared : int
Number of shared active kinases
"""
# Load kinase data from kinase-kinase matrix (by query kinase)
kinase_data_by_kinase = self._kinase_data_by_query_kinase(
self.kinase_query, kinase_kinase_matrix
)
# Load kinase data from ligand-kinase matrix (by query ligand)
ranks_by_ligand = self._kinase_data_by_query_ligand(
self.ligand_query, ligand_kinase_matrix
)
if kinase_activity_max:
ranks_by_ligand[f"active_{self.ligand_kinase_method}"] = (
ranks_by_ligand["measure"] <= kinase_activity_cutoff
)
else:
ranks_by_ligand[f"active_{self.ligand_kinase_method}"] = (
ranks_by_ligand["measure"] >= kinase_activity_cutoff
)
# Merge two datasets while keeping only common kinases
kinase_data = pd.merge(
kinase_data_by_kinase,
ranks_by_ligand,
how="inner",
on="kinase",
suffixes=(f"_{self.kinase_kinase_method}", f"_{self.ligand_kinase_method}"),
)
kinase_data = kinase_data.set_index("kinase")
# Now rank kinases again based only on the shared kinases
kinase_data[f"rank2_{self.kinase_kinase_method}"] = kinase_data[
f"measure_{self.kinase_kinase_method}"
].rank()
kinase_data[f"rank2_{self.ligand_kinase_method}"] = kinase_data[
f"measure_{self.ligand_kinase_method}"
].rank()
# Rearange columns
kinase_data = kinase_data[
[
f"measure_{self.ligand_kinase_method}",
f"active_{self.ligand_kinase_method}",
f"rank1_{self.ligand_kinase_method}",
f"rank2_{self.ligand_kinase_method}",
f"measure_{self.kinase_kinase_method}",
f"rank1_{self.kinase_kinase_method}",
f"rank2_{self.kinase_kinase_method}",
]
]
kinase_data.columns = [f"{i.split('_')[1]}.{i.split('_')[0]}" for i in kinase_data.columns]
# Log number of kinases for different criteria
n_kinases_by_kinase = kinase_data_by_kinase.shape[0]
n_kinases_by_ligand = ranks_by_ligand.shape[0]
n_kinases_shared = kinase_data.shape[0]
n_active_kinases_shared = kinase_data[
kinase_data[f"{self.ligand_kinase_method}.active"]
].shape[0]
# Sort rows by kinase-kinase method (DO NOT CHANGE!)
kinase_data = kinase_data.sort_values(f"{self.kinase_kinase_method}.measure")
# Throw error if...
try:
# ... query kinase is not part of the ligand dataset
kinase_data.loc[self.kinase_query]
except KeyError:
raise KeyError(
f"{self.kinase_query} is not part of the ligand profiling dataset "
f"for {self.ligand_query}"
)
try:
# ... query kinase is not measured as an active kinase in ligand dataset
kinase_data[kinase_data[f"{self.ligand_kinase_method}.active"]].loc[self.kinase_query]
except KeyError:
raise KeyError(
f"{self.kinase_query} is not an active kinase in the ligand profiling dataset "
f"for {self.ligand_query}"
)
return (
kinase_data,
n_kinases_by_kinase,
n_kinases_by_ligand,
n_kinases_shared,
n_active_kinases_shared,
)
def _kinase_data_by_query_ligand(self, ligand_query, ligand_kinase_matrix):
"""
Parameters
----------
ligand_query : str
Ligand name (kinases are extracted from `ligand_kinase_matrix` by this ligand).
ligand_kinase_matrix : pandas.DataFrame
Profiling data for a ligand set against a kinase set loaded from `src.data.profiling`.
Returns
-------
pandas.DataFrame
Contains for each kinase (rows) details on profiling and distances ranks (columns):
kinase : name
Kinase name.
measure : float
Ligand profiling data.
rank1 : float
Kinase rank by profiling data based on all ligand-kinase method data points.
"""
try:
kinase_data = ligand_kinase_matrix[ligand_query]
kinase_data = self._add_rank(kinase_data)
return kinase_data
except KeyError:
raise KeyError(f"Query ligand {ligand_query} is not part of dataset.")
|
the-stack_106_27330 | """
This script is used to write `sqf/dababase.py`, that contains all valid SQF expressions.
It reads a file from here:
https://raw.githubusercontent.com/intercept/intercept/master/src/client/headers/client/sqf_pointers_declaration.hpp
"""
import urllib.request
from sqf.interpreter_types import ForType, IfType, SwitchType, WhileType, TryType, WithType
from sqf.types import Code, Array, Boolean, Number, Type, Nothing, Anything, String, Namespace, \
Object, Config, Script, Control, Group, Display, Side, Task, Location, NetObject, DiaryReport, TeamMember, HashMap
# The mapping of SQF types to our types
STRING_TO_TYPE = {
'array': Array,
'scalar': Number,
'bool': Boolean,
'code': Code,
'string': String,
'text': String,
'namespace': Namespace,
'config': Config,
'location': Location,
'object': Object,
'group': Group,
'member': TeamMember, # team_member gets split
'control': Control,
'display': Display,
'exception': TryType,
'for': ForType,
'if': IfType,
'switch': SwitchType,
'while': WhileType,
'with': WithType,
'side': Side,
'task': Task,
'script': Script,
'nan': Number,
'nothing': Nothing,
'netobject': NetObject,
'any': Type,
'diary': DiaryReport, # diary_record gets split
'hashmap': HashMap
}
# the argument the type is initialized with
TYPE_TO_INIT_ARGS = {
Namespace: "'missionNamespace'",
}
# The return type "ANY" means that we do not know it, so it is Nothing()
STRING_TO_TYPE_RETURN = STRING_TO_TYPE.copy()
STRING_TO_TYPE_RETURN['any'] = Anything
WRONG_RETURN_TYPES = {
'attachedto': Object,
'getclientstatenumber': Number,
'handgunmagazine': Array,
'ammoonpylon': Anything
}
def _parse_type_names(type_names):
# Alternative types separated by _ char
types_names = type_names.split('_')
# Never care about NaN type (covered by scalar)
if 'nan' in types_names:
types_names.remove('nan')
# Remove parts of types that also get split
if 'team' in types_names:
types_names.remove('team')
if 'record' in types_names:
types_names.remove('record')
return types_names
def _parse_return_type_names(return_type_names):
return_type_names = _parse_type_names(return_type_names)
if len(return_type_names) > 1 and 'nothing' in return_type_names:
return_type_names.remove('nothing')
if len(return_type_names) > 1:
return_type_name = 'any'
else:
return_type_name = return_type_names[0]
return STRING_TO_TYPE_RETURN[return_type_name]
# url = 'https://raw.githubusercontent.com/intercept/intercept/master/src/client/headers/client/sqf_pointers_declaration.hpp'
# data = urllib.request.urlopen(url).read().decode('utf-8').split('\n')
data = open('sqf_pointers_declaration.hpp', 'r').read().split('\n')
expressions = []
for line in data:
if not line.startswith('static '):
continue
sections = line.split('__')
num_sections = len(sections)
if num_sections not in [4, 5, 6]:
print('Could\'t read line: ', line)
continue
# Name always comes first
op_name = sections[1]
# Return type always comes last (some operators have incorrect values for whatever reason)
if op_name in WRONG_RETURN_TYPES:
return_type = WRONG_RETURN_TYPES[op_name]
else:
return_type = _parse_return_type_names(sections[num_sections-1][:-1])
# Adds any relevant initialization argument for the return type
init_code = ''
# Number of sections allows us to classify the operation
if num_sections == 6:
if return_type in TYPE_TO_INIT_ARGS:
init_code = ', action=lambda lhs, rhs, i: %s' % TYPE_TO_INIT_ARGS[return_type]
for lhs_type_name in _parse_type_names(sections[2]):
lhs_type = STRING_TO_TYPE[lhs_type_name]
for rhs_type_name in _parse_type_names(sections[3]):
rhs_type = STRING_TO_TYPE[rhs_type_name]
expression = 'BinaryExpression(' \
'{lhs_type}, ' \
'Keyword(\'{keyword}\'), ' \
'{rhs_type}, {return_type}{init_code})'.format(
lhs_type=lhs_type.__name__,
keyword=op_name,
rhs_type=rhs_type.__name__,
return_type=return_type.__name__,
init_code=init_code
)
expressions.append(expression)
elif num_sections == 5:
if return_type in TYPE_TO_INIT_ARGS:
init_code = ', action=lambda rhs, i: %s' % TYPE_TO_INIT_ARGS[return_type]
for rhs_type_name in _parse_type_names(sections[2]):
rhs_type = STRING_TO_TYPE[rhs_type_name]
expression = 'UnaryExpression(' \
'Keyword(\'{keyword}\'), ' \
'{rhs_type}, {return_type}{init_code})'.format(
keyword=op_name,
rhs_type=rhs_type.__name__,
return_type=return_type.__name__,
init_code=init_code
)
expressions.append(expression)
else:
if return_type in TYPE_TO_INIT_ARGS:
init_code = ', action=lambda i: %s' % TYPE_TO_INIT_ARGS[return_type]
expression = 'NullExpression(' \
'Keyword(\'{keyword}\'), ' \
'{return_type}{init_code})'.format(
keyword=op_name,
return_type=return_type.__name__,
init_code=init_code
)
expressions.append(expression)
preamble = r'''# This file is generated automatically by `build_database.py`. Change it there.
from sqf.expressions import BinaryExpression, UnaryExpression, NullExpression
from sqf.types import Keyword, Type, Nothing, Anything, String, Code, Array, Number, Boolean, Namespace, \
Object, Config, Script, Control, Group, Display, Side, Task, Location, NetObject, DiaryReport, TeamMember, HashMap
from sqf.interpreter_types import WhileType, \
ForType, SwitchType, IfType, TryType, WithType'''
# Expressions that use symbols are hardcoded since they aren't present in the parsed file
symbols = r'''
EXPRESSIONS = [
BinaryExpression(Array, Keyword('#'), Number, Anything),
BinaryExpression(Number, Keyword('!='), Boolean, Boolean),
BinaryExpression(Number, Keyword('!='), Boolean, Anything),
BinaryExpression(Number, Keyword('!='), Number, Boolean),
BinaryExpression(String, Keyword('!='), String, Boolean),
BinaryExpression(Object, Keyword('!='), Object, Boolean),
BinaryExpression(Group, Keyword('!='), Group, Boolean),
BinaryExpression(Side, Keyword('!='), Side, Boolean),
BinaryExpression(String, Keyword('!='), String, Boolean),
BinaryExpression(Config, Keyword('!='), Config, Boolean),
BinaryExpression(Display, Keyword('!='), Display, Boolean),
BinaryExpression(Control, Keyword('!='), Control, Boolean),
BinaryExpression(TeamMember, Keyword('!='), TeamMember, Boolean),
BinaryExpression(NetObject, Keyword('!='), NetObject, Boolean),
BinaryExpression(Task, Keyword('!='), Task, Boolean),
BinaryExpression(Location, Keyword('!='), Location, Boolean),
BinaryExpression(Number, Keyword('%'), Number, Number),
BinaryExpression(Boolean, Keyword('&&'), Boolean, Boolean),
BinaryExpression(Boolean, Keyword('&&'), Code, Boolean),
BinaryExpression(Number, Keyword('*'), Number, Number),
BinaryExpression(Number, Keyword('+'), Number, Number),
BinaryExpression(String, Keyword('+'), String, String),
BinaryExpression(Array, Keyword('+'), Array, Array),
BinaryExpression(Number, Keyword('-'), Number, Number),
BinaryExpression(Array, Keyword('-'), Array, Array),
BinaryExpression(Number, Keyword('/'), Number, Number),
BinaryExpression(Config, Keyword('/'), String, Config),
BinaryExpression(SwitchType, Keyword(':'), Code, Nothing),
BinaryExpression(Number, Keyword('<'), Number, Boolean),
BinaryExpression(Number, Keyword('<='), Number, Boolean),
BinaryExpression(Number, Keyword('=='), Number, Boolean),
BinaryExpression(String, Keyword('=='), String, Boolean),
BinaryExpression(Object, Keyword('=='), Object, Boolean),
BinaryExpression(Group, Keyword('=='), Group, Boolean),
BinaryExpression(Side, Keyword('=='), Side, Boolean),
BinaryExpression(String, Keyword('=='), String, Boolean),
BinaryExpression(Config, Keyword('=='), Config, Boolean),
BinaryExpression(Display, Keyword('=='), Display, Boolean),
BinaryExpression(Control, Keyword('=='), Control, Boolean),
BinaryExpression(TeamMember, Keyword('=='), TeamMember, Boolean),
BinaryExpression(NetObject, Keyword('=='), NetObject, Boolean),
BinaryExpression(Task, Keyword('=='), Task, Boolean),
BinaryExpression(Location, Keyword('=='), Location, Boolean),
BinaryExpression(Number, Keyword('>'), Number, Boolean),
BinaryExpression(Number, Keyword('>='), Number, Boolean),
BinaryExpression(Config, Keyword('>>'), String, Config),
BinaryExpression(Number, Keyword('^'), Number, Number),
BinaryExpression(Boolean, Keyword('||'), Boolean, Boolean),
BinaryExpression(Boolean, Keyword('||'), Code, Boolean),
BinaryExpression(Array, Keyword('#'), Number, Anything),
UnaryExpression(Keyword('!'), Boolean, Boolean),
UnaryExpression(Keyword('+'), Number, Number),
UnaryExpression(Keyword('+'), Array, Array),
UnaryExpression(Keyword('-'), Number, Number),
BinaryExpression(Object, Keyword('ammoonpylon'), String, Boolean),
BinaryExpression(Object, Keyword('ammoonpylon'), Number, Boolean),
BinaryExpression(HashMap, Keyword('deleteat'), Side, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), Config, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), String, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), Number, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), Boolean, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), Array, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), Code, Anything),
BinaryExpression(HashMap, Keyword('deleteat'), Namespace, Anything),
BinaryExpression(HashMap, Keyword('get'), Side, Anything),
BinaryExpression(HashMap, Keyword('get'), Config, Anything),
BinaryExpression(HashMap, Keyword('get'), String, Anything),
BinaryExpression(HashMap, Keyword('get'), Number, Anything),
BinaryExpression(HashMap, Keyword('get'), Boolean, Anything),
BinaryExpression(HashMap, Keyword('get'), Array, Anything),
BinaryExpression(HashMap, Keyword('get'), Code, Anything),
BinaryExpression(HashMap, Keyword('get'), Namespace, Anything),
BinaryExpression(Side, Keyword('in'), HashMap, Boolean),
BinaryExpression(Config, Keyword('in'), HashMap, Boolean),
BinaryExpression(String, Keyword('in'), HashMap, Boolean),
BinaryExpression(Number, Keyword('in'), HashMap, Boolean),
BinaryExpression(Boolean, Keyword('in'), HashMap, Boolean),
BinaryExpression(Array, Keyword('in'), HashMap, Boolean),
BinaryExpression(Code, Keyword('in'), HashMap, Boolean),
BinaryExpression(Namespace, Keyword('in'), HashMap, Boolean),
'''
with open('sqf/database.py', 'w') as f:
f.write(preamble + '\n\n')
f.write(symbols + ' ')
f.write(',\n '.join(expressions))
f.write('\n]\n')
|
the-stack_106_27331 | import io
from datetime import timedelta
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
matplotlib.use("Agg")
matplotlib.style.use("seaborn")
def _moving_avg(data, days=7):
# Use 1d convolution for moving average, as explained in https://stackoverflow.com/a/22621523.
return np.convolve(data, np.ones(days) / days, mode="valid")
def plot_timeseries(data):
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
cases, deaths = _moving_avg(data["cases"]), _moving_avg(data["deaths"])
dates = [data["last_date"] - timedelta(days=i) for i in range(len(cases))][::-1]
plt.figure(figsize=(13, 8))
plt.plot(dates, cases, ".-c", label="Infections")
plt.fill_between(dates, cases, color="c", alpha=0.5)
plt.plot(dates, deaths, ".-r", label="Deaths")
plt.fill_between(dates, deaths, color="r", alpha=0.5)
plt.annotate(round(cases[-1]), (dates[-1], cases[-1]), ha="right", va="bottom", color="c")
plt.annotate(round(deaths[-1]), (dates[-1], deaths[-1]), ha="right", va="bottom", color="r")
plt.legend()
plt.xticks(rotation=30, ha="right")
plt.xlim((dates[0], dates[-1]))
plt.ylabel("Cases (moving 7-day avg.)")
plt.title("New Covid-19 Cases in {} - {} Days".format(data["name"], len(cases)))
plt.text(0, 0, "by @coronaviruskenyabot; data by JHUCSSE", fontsize=6, va="bottom", transform=ax.transAxes)
plt.tight_layout()
buffer = io.BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
plt.clf()
return buffer
def plot_vaccinations_series(data):
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
vaccinations = _moving_avg(data["vaccinations"])
dates = [data["last_date"] - timedelta(days=i) for i in range(len(vaccinations))][::-1]
plt.figure(figsize=(13, 8))
plt.plot(dates, vaccinations, ".-g")
plt.fill_between(dates, vaccinations, color="g", alpha=0.5)
plt.xticks(rotation=30, ha="right")
plt.xlim((dates[0], dates[-1]))
plt.ylabel("Vaccinations Doses (moving 7-day avg.)")
plt.title("Daily Vaccination Doses in {} - {} Days".format(data["name"], len(vaccinations)))
plt.text(0.01, 0.95, f"Total: {data['total']:,}", weight="bold", transform=ax.transAxes)
plt.text(
0, 0, "by @coronaviruskenyabot; data by ourworldindata.org.", fontsize=6, va="bottom", transform=ax.transAxes
)
plt.tight_layout()
buffer = io.BytesIO()
plt.savefig(buffer, format="png")
buffer.seek(0)
plt.clf()
return buffer
if __name__ == "__main__":
import argparse
from statistics_api import CovidApi
parser = argparse.ArgumentParser(description="Create timeline plots used by @coronaviruskenyabot")
parser.add_argument("type", type=str, choices=["cases", "vacc"], help="type of plot to create")
parser.add_argument("--country", type=str, default=None, help="country to plot, world by default")
parser.add_argument("-o", "--output", type=str, default="plot.png", help="output file, defaults to plot.png")
args = parser.parse_args()
api = CovidApi()
if args.type == "cases":
data = api.timeseries(country=args.country)
buffer = plot_timeseries(data)
else:
data = api.vaccinations_series(country=args.country)
buffer = plot_vaccinations_series(data)
with open(args.output, "wb") as f:
f.write(buffer.getvalue())
|
the-stack_106_27334 | from typing import List
from homecomp.models import AssetMixin
from homecomp.models import BudgetItem
from homecomp.models import BudgetLineItem
from homecomp.models import MonthlyBudget
from homecomp.models import MonthlyExpense
from homecomp import const
class Investment(AssetMixin, BudgetItem):
"""
Simple Investment.
Always generates expected return and invests remaining budget. If the
budget is negative then the assumption is that the investment is liquid
enough to fill any budget gaps.
"""
def __init__(self,
principal: int = 0,
roi: float = const.DEFAULT_INVESTMENT_RETURN_RATE,
**kwargs):
super().__init__(value=principal, **kwargs)
self.rate = roi
def _step(self, budget: MonthlyBudget) -> MonthlyExpense:
"""Calculate cost for current period"""
self.value = round(self.value * (1 + self.rate), 2)
self.value += budget.remaining
return MonthlyExpense(
savings=-budget.remaining,
costs=0
)
class Home(AssetMixin, BudgetItem):
"""
Simple Home.
Behaves the same way as an investment only with different default rate of return
and no monthly budget contribution.
"""
def __init__(self,
price: int,
lifetime: List[int] = None,
down_payment_pct: float = const.DEFAULT_DOWN_PAYMENT_PCT,
appreciation: float = const.DEFAULT_HOME_APPRECIATION_RATE,
buying_costs_rate: float = const.DEFAULT_HOME_BUYING_COSTS_PCT,
selling_costs_rate: float = const.DEFAULT_HOME_SELLING_COSTS_PCT,
**kwargs):
lifetime = lifetime or []
initial_value = price if const.INIT_PERIOD in lifetime else 0
super().__init__(value=initial_value, **kwargs)
self.price = price
self.rate = appreciation
self.lifetime = lifetime
self.down_payment_pct = down_payment_pct
self.buying_costs_rate = buying_costs_rate
self.selling_costs_rate = selling_costs_rate
def is_owned(self, period):
if not self.lifetime:
return True
return period in self.lifetime
@property
def buying_period(self):
if not self.lifetime:
return const.NEVER_PERIOD
return self.lifetime[0] - 1 # bought period before the first period the home is owned
@property
def selling_period(self):
if not self.lifetime:
return const.NEVER_PERIOD
return self.lifetime[-1]
def _purchasing_step(self, budget: MonthlyBudget) -> MonthlyExpense:
"""Set asset value and remove down payment and buying costs from cash flow"""
self.value = self.price
buying_costs = self.price * self.buying_costs_rate
down_payment = self.price * self.down_payment_pct
return MonthlyExpense(
costs=-buying_costs,
savings=-down_payment
)
def _selling_step(self, budget: MonthlyBudget) -> MonthlyExpense:
"""Clear asset value and add liquid asset value to budget minus selling costs"""
sell_price = self.value
selling_costs = sell_price * self.selling_costs_rate
self.value = 0
return MonthlyExpense(
costs=-selling_costs,
savings=sell_price
)
def _step(self, budget: MonthlyBudget) -> MonthlyExpense:
"""Calculate cost for current period"""
if self.period == self.buying_period:
return self._purchasing_step(budget)
elif self.period == self.selling_period:
return self._selling_step(budget)
self.value = round(self.value * (1 + self.rate), 2)
return MonthlyExpense()
|
the-stack_106_27335 | import logging
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
from unittest.mock import MagicMock, ANY
from mpfmc.widgets.video import VideoWidget
try:
from mpfmc.core.audio import SoundSystem
from mpfmc.assets.sound import SoundStealingMethod
except ImportError:
SoundSystem = None
SoundStealingMethod = None
logging.warning("mpfmc.core.audio library could not be loaded. Audio "
"features will not be available")
class TestAudioGStreamer(MpfMcTestCase):
"""
Tests the GStreamer audio features in the media controller. The core audio library is a
custom extension library written in Cython that interfaces with the SDL2 and
SDL_Mixer libraries.
"""
def get_machine_path(self):
return 'tests/machine_files/audio'
def get_config_file(self):
return 'test_audio_gstreamer.yaml'
def test_loading_while_playing_video(self):
""" Tests loading a sound file while playing a video (both using gstreamer) """
if SoundSystem is None or self.mc.sound_system is None:
log = logging.getLogger('TestAudio')
log.warning("Sound system is not enabled - skipping audio tests")
self.skipTest("Sound system is not enabled")
self.assertIsNotNone(self.mc.sound_system)
interface = self.mc.sound_system.audio_interface
if interface is None:
log = logging.getLogger('TestAudio')
log.warning("Sound system audio interface could not be loaded - skipping audio tests")
self.skipTest("Sound system audio interface could not be loaded")
self.assertIsNotNone(interface)
# Mock BCP send method
self.mc.bcp_processor.send = MagicMock()
self.mc.bcp_processor.enabled = True
# Make sure the low quality test video exists
self.assertIn('mpf_video_small_test', self.mc.videos)
self.mc.events.post('show_slide1')
self.advance_time()
video_widget = self.mc.targets['default'].current_slide.widgets[0].widget
self.assertEqual(video_widget.state, 'play')
self.assertTrue(video_widget.video.loaded)
self.mc.events.post('play_sound_sfx_028')
self.advance_real_time(1)
self.mc.events.post('play_city_loop')
self.advance_real_time(1)
self.mc.events.post('play_sound_text')
self.advance_real_time(0.35)
self.mc.events.post('play_sound_text')
self.advance_real_time(6)
self.mc.bcp_processor.send.assert_any_call('trigger', sound_instance=ANY, name='text_sound_played')
self.mc.bcp_processor.send.assert_any_call('trigger', name='test_video_played')
self.mc.bcp_processor.send.assert_any_call('trigger', name='test_video_stopped')
def test_retrigger_streamed_sound(self):
""" Tests retriggering a streamed sound (play, stop, play) (using gstreamer) """
if SoundSystem is None or self.mc.sound_system is None:
log = logging.getLogger('TestAudio')
log.warning("Sound system is not enabled - skipping audio tests")
self.skipTest("Sound system is not enabled")
self.assertIsNotNone(self.mc.sound_system)
interface = self.mc.sound_system.audio_interface
if interface is None:
log = logging.getLogger('TestAudio')
log.warning("Sound system audio interface could not be loaded - skipping audio tests")
self.skipTest("Sound system audio interface could not be loaded")
self.assertIsNotNone(interface)
track_music = interface.get_track_by_name("music")
self.assertIsNotNone(track_music)
# Mock BCP send method
self.mc.bcp_processor.send = MagicMock()
self.mc.bcp_processor.enabled = True
self.mc.events.post('play_city_loop')
self.advance_real_time(1)
self.assertTrue(track_music.sound_is_playing(self.mc.sounds['city_loop']))
self.mc.events.post('stop_city_loop')
self.advance_real_time(0.25)
self.assertFalse(track_music.sound_is_playing(self.mc.sounds['city_loop']))
self.advance_real_time(0.25)
self.mc.events.post('play_city_loop')
self.advance_real_time(1)
self.assertTrue(track_music.sound_is_playing(self.mc.sounds['city_loop']))
self.advance_real_time(1)
|
the-stack_106_27337 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.units import Quantity
from astropy.io import fits
from astropy import log
from astropy.table import Table
from astropy.extern import six
from ..utils.fits import table_to_fits_table
__all__ = [
'Energy',
'EnergyBounds',
]
class Energy(Quantity):
"""Energy quantity scalar or array.
This is a `~astropy.units.Quantity` sub-class that adds convenience methods
to handle common tasks for energy bin center arrays, like FITS I/O or generating
equal-log-spaced grids of energies.
See :ref:`energy_handling_gammapy` for further information.
Parameters
----------
energy : `~numpy.array`, scalar, `~astropy.units.Quantity`
Energy
unit : `~astropy.units.UnitBase`, str, optional
The unit of the value specified for the energy. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
"""
def __new__(cls, energy, unit=None, dtype=None, copy=True):
# Techniques to subclass Quantity taken from astropy.coordinates.Angle
# see: http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
if isinstance(energy, six.string_types):
val, unit = energy.split()
energy = float(val)
self = super(Energy, cls).__new__(cls, energy, unit,
dtype=dtype, copy=copy)
if not self.unit.is_equivalent('eV'):
raise ValueError("Given unit {0} is not an"
" energy".format(self.unit.to_string()))
return self
def __array_finalize__(self, obj):
super(Energy, self).__array_finalize__(obj)
def __quantity_subclass__(self, unit):
if unit.is_equivalent('eV'):
return Energy, True
else:
return super(Energy, self).__quantity_subclass__(unit)[0], False
@property
def nbins(self):
"""
The number of bins
"""
return self.size
@property
def range(self):
"""
The covered energy range (tuple)
"""
return self[0:self.size:self.size - 1]
@classmethod
def equal_log_spacing(cls, emin, emax, nbins, unit=None):
"""Create Energy with equal log-spacing (`~gammapy.utils.energy.Energy`).
if no unit is given, it will be taken from emax
Parameters
----------
emin : `~astropy.units.Quantity`, float
Lowest energy bin
emax : `~astropy.units.Quantity`, float
Highest energy bin
bins : int
Number of bins
unit : `~astropy.units.UnitBase`, str
Energy unit
"""
if unit is not None:
emin = Energy(emin, unit)
emax = Energy(emax, unit)
else:
emin = Energy(emin)
emax = Energy(emax)
unit = emax.unit
emin = emin.to(unit)
x_min, x_max = np.log10([emin.value, emax.value])
energy = np.logspace(x_min, x_max, nbins)
return cls(energy, unit, copy=False)
@classmethod
def from_fits(cls, hdu, unit=None):
"""Read ENERGIES fits extension (`~gammapy.utils.energy.Energy`).
Parameters
----------
hdu: `~astropy.io.fits.BinTableHDU`
``ENERGIES`` extensions.
unit : `~astropy.units.UnitBase`, str, None
Energy unit
"""
header = hdu.header
fitsunit = header.get('TUNIT1')
if fitsunit is None:
if unit is not None:
log.warning("No unit found in the FITS header."
" Setting it to {0}".format(unit))
fitsunit = unit
else:
raise ValueError("No unit found in the FITS header."
" Please specifiy a unit")
energy = cls(hdu.data['Energy'], fitsunit)
return energy.to(unit)
def to_fits(self, **kwargs):
"""Write ENERGIES fits extension
Returns
-------
hdu: `~astropy.io.fits.BinTableHDU`
ENERGIES fits extension
"""
col1 = fits.Column(name='Energy', format='D', array=self.value)
cols = fits.ColDefs([col1])
hdu = fits.BinTableHDU.from_columns(cols)
hdu.name = 'ENERGIES'
hdu.header['TUNIT1'] = "{0}".format(self.unit.to_string('fits'))
return hdu
class EnergyBounds(Energy):
"""EnergyBounds array.
This is a `~gammapy.utils.energy.Energy` sub-class that adds convenience
methods to handle common tasks for energy bin edges arrays, like FITS I/O or
generating arrays of bin centers.
See :ref:`energy_handling_gammapy` for further information.
Parameters
----------
energy : `~numpy.array`, scalar, `~astropy.units.Quantity`
EnergyBounds
unit : `~astropy.units.UnitBase`, str
The unit of the values specified for the energy. This may be any
string that `~astropy.units.Unit` understands, but it is better to
give an actual unit object.
"""
@property
def nbins(self):
"""
The number of bins
"""
return self.size - 1
@property
def log_centers(self):
"""Log centers of the energy bounds
"""
center = np.sqrt(self[:-1] * self[1:])
return center.view(Energy)
@property
def upper_bounds(self):
"""Upper energy bin edges
"""
return self[1:]
@property
def lower_bounds(self):
"""Lower energy bin edges
"""
return self[:-1]
@property
def boundaries(self):
"""Energy range"""
return self[[0, -1]]
@property
def bands(self):
"""Width of the energy bins
"""
upper = self.upper_bounds
lower = self.lower_bounds
return upper - lower
@classmethod
def from_lower_and_upper_bounds(cls, lower, upper, unit=None):
"""EnergyBounds from lower and upper bounds (`~gammapy.utils.energy.EnergyBounds`).
If no unit is given, it will be taken from upper
Parameters
----------
lower,upper : `~astropy.units.Quantity`, float
Lowest and highest energy bin
unit : `~astropy.units.UnitBase`, str, None
Energy units
"""
# np.append renders Quantities dimensionless
# http://docs.astropy.org/en/latest/known_issues.html#quantity-issues
lower = cls(lower, unit)
upper = cls(upper, unit)
unit = upper.unit
energy = np.hstack((lower, upper[-1]))
return cls(energy.value, unit)
@classmethod
def equal_log_spacing(cls, emin, emax, nbins, unit=None):
"""EnergyBounds with equal log-spacing (`~gammapy.utils.energy.EnergyBounds`).
If no unit is given, it will be taken from emax
Parameters
----------
emin : `~astropy.units.Quantity`, float
Lowest energy bin
emax : `~astropy.units.Quantity`, float
Highest energy bin
bins : int
Number of bins
unit : `~astropy.units.UnitBase`, str, None
Energy unit
"""
return super(EnergyBounds, cls).equal_log_spacing(
emin, emax, nbins + 1, unit)
@classmethod
def from_ebounds(cls, hdu, unit=None):
"""Read EBOUNDS fits extension (`~gammapy.utils.energy.EnergyBounds`).
Parameters
----------
hdu: `~astropy.io.fits.BinTableHDU`
``EBOUNDS`` extensions.
unit : `~astropy.units.UnitBase`, str, None
Energy unit
"""
if hdu.name != 'EBOUNDS':
log.warning('This does not seem like an EBOUNDS extension. '
'Are you sure?')
header = hdu.header
unit = header.get('TUNIT2')
low = hdu.data['E_MIN']
high = hdu.data['E_MAX']
return cls.from_lower_and_upper_bounds(low, high, unit)
@classmethod
def from_rmf_matrix(cls, hdu, unit=None):
"""Read MATRIX fits extension (`~gammapy.utils.energy.EnergyBounds`).
Parameters
----------
hdu: `~astropy.io.fits.BinTableHDU`
``MATRIX`` extensions.
unit : `~astropy.units.UnitBase`, str, None
Energy unit
"""
if hdu.name != 'MATRIX':
log.warning('This does not seem like a MATRIX extension. '
'Are you sure?')
header = hdu.header
unit = header.get('TUNIT1')
low = hdu.data['ENERG_LO']
high = hdu.data['ENERG_HI']
return cls.from_lower_and_upper_bounds(low, high, unit)
def bin(self, i):
"""
Return energy bin edges (zero-based numbering)
Parameters
----------
i : int
Energy bin
"""
return self[[i, i + 2]]
def find_energy_bin(self, energy):
"""Find the bins that contain the specified energy values.
Parameters
----------
energy : `~gammapy.utils.energy.Energy`
Array of energies to search for.
Returns
-------
bin_index : `~numpy.ndarray`
Indices of the energy bins containing the specified energies.
"""
# check that the specified energy is within the boundaries
if not self.contains(energy).all():
ss_error = "Specified energy {}".format(energy)
ss_error += " is outside the boundaries {}".format(self.boundaries)
raise ValueError(ss_error)
bin_index = np.searchsorted(self.upper_bounds, energy)
return bin_index
def contains(self, energy):
"""Check of energy is contained in boundaries
Parameters
----------
energy : `~gammapy.utils.energy.Energy`
Array of energies to test
"""
return (energy > self[0]) & (energy < self[-1])
def to_table(self, unit=None):
"""Convert to `~astropy.table.Table`.
"""
if unit is None:
unit = self.unit
table = Table()
table['CHANNEL'] = np.arange(self.nbins, dtype=np.int16)
table['E_MIN'] = Quantity(self.lower_bounds, unit=unit, dtype=np.float32)
table['E_MAX'] = Quantity(self.upper_bounds, unit=unit, dtype=np.float32)
return table
def to_ebounds(self, unit=None, **kwargs):
"""Write EBOUNDS fits extension
Returns
-------
hdu: `~astropy.io.fits.BinTableHDU`
EBOUNDS fits extension
"""
hdu = table_to_fits_table(self.to_table(unit))
header = hdu.header
header['EXTNAME'] = 'EBOUNDS', 'Name of this binary table extension'
header['TELESCOP'] = 'DUMMY', 'Mission/satellite name'
header['INSTRUME'] = 'DUMMY', 'Instrument/detector'
header['FILTER'] = '', 'Filter information'
header['CHANTYPE'] = 'PHA', 'Type of channels (PHA, PI etc)'
header['DETCHANS'] = self.nbins, 'Total number of detector PHA channels'
header['HDUCLASS'] = 'OGIP', 'Organisation devising file format'
header['HDUCLAS1'] = 'RESPONSE', 'File relates to response of instrument'
header['HDUCLAS2'] = 'EBOUNDS', 'This is an EBOUNDS extension'
header['HDUVERS'] = '1.2.0', 'Version of file format'
# Obsolet EBOUNDS headers, included for the benefit of old software
header['RMFVERSN'] = '1992a', 'Obsolete'
header['HDUVERS1'] = '1.0.0', 'Obsolete'
header['HDUVERS2'] = '1.1.0', 'Obsolete'
return hdu
def to_dict(self):
"""Construct dict representing an energy range"""
if len(self) != 2:
raise ValueError(
"This is not an energy range. Nbins: {}".format(self.nbins))
d = dict(min=self[0].value, max=self[1].value, unit='{}'.format(self.unit))
return d
@classmethod
def from_dict(cls, d):
"""Read dict representing an energy range"""
return cls((d['min'], d['max']), d['unit'])
|
the-stack_106_27338 | try:
from io import StringIO
from io import BytesIO
except ImportError:
from cStringIO import StringIO # NOQA
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser # NOQA
from circus.tests.support import TestCase
from circus.tests.support import EasyTestSuite
from circus.tests.support import skipIf, PYTHON, IS_WINDOWS
import os
import shutil
import tempfile
from pipes import quote as shell_escape_arg
import subprocess
import time
import yaml
import json
import logging.config
import sys
HERE = os.path.abspath(os.path.dirname(__file__))
CONFIG_PATH = os.path.join(HERE, 'config', 'circus.ini')
def run_circusd(options=(), config=(), log_capture_path="log.txt",
additional_files=()):
options = list(options)
additional_files = dict(additional_files)
config_ini_update = {
"watcher:touch.cmd": PYTHON,
"watcher:touch.args": "-c \"open('workerstart.txt', 'w+').close()\"",
"watcher:touch.respawn": 'False'
}
config_ini_update.update(dict(config))
config_ini = ConfigParser()
config_ini.read(CONFIG_PATH)
for dottedkey in config_ini_update:
section, key = dottedkey.split(".", 1)
if section not in config_ini.sections():
config_ini.add_section(section)
config_ini.set(
section, key, config_ini_update[dottedkey])
temp_dir = tempfile.mkdtemp()
try:
circus_ini_path = os.path.join(temp_dir, "circus.ini")
with open(circus_ini_path, "w") as fh:
config_ini.write(fh)
for relpath in additional_files:
path = os.path.join(temp_dir, relpath)
with open(path, "w") as fh:
fh.write(additional_files[relpath])
env = os.environ.copy()
sep = ';' if IS_WINDOWS else ':'
# We're going to run circus from a process with a different
# cwd, so we need to make sure that Python will import the
# current version of circus
pythonpath = env.get('PYTHONPATH', '')
pythonpath += '%s%s' % (sep, os.path.abspath(
os.path.join(HERE, os.pardir, os.pardir)))
env['PYTHONPATH'] = pythonpath
argv = ["circus.circusd"] + options + [circus_ini_path]
if sys.gettrace() is None or IS_WINDOWS:
# killing a coverage run process leaves a zombie on
# Windows so we should skip coverage
argv = [PYTHON, "-m"] + argv
else:
exe_dir = os.path.dirname(PYTHON)
coverage = os.path.join(exe_dir, "coverage")
if not os.path.isfile(coverage):
coverage = "coverage"
argv = [coverage, "run", "-p", "-m"] + argv
child = subprocess.Popen(argv, cwd=temp_dir, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
try:
touch_path = os.path.join(temp_dir, "workerstart.txt")
while True:
child.poll()
if os.path.exists(touch_path):
break
if child.returncode is not None:
break
time.sleep(0.01)
finally:
child.terminate()
child.wait()
log_file_path = os.path.join(temp_dir, log_capture_path)
try:
if os.path.exists(log_file_path):
with open(log_file_path, "r") as fh:
return fh.read()
else:
if child.stdout is not None:
raise Exception(child.stdout.read().decode("ascii"))
finally:
if child.stdout is not None:
child.stdout.close()
if child.stderr is not None:
child.stderr.close()
if child.stdin is not None:
child.stdin.close()
assert child.returncode == 0, \
" ".join(shell_escape_arg(a) for a in argv)
finally:
for basename in sorted(os.listdir(temp_dir)):
if basename.startswith(".coverage."):
source = os.path.join(temp_dir, basename)
target = os.path.abspath(basename)
shutil.copy(source, target)
try:
shutil.rmtree(temp_dir)
except OSError:
# Sometimes on Windows we can't delete the
# logging file because it says it's still in
# use (lock).
pass
EXAMPLE_YAML = """\
version: 1
disable_existing_loggers: false
formatters:
simple:
format: '%(asctime)s - %(name)s - [%(levelname)s] %(message)s'
handlers:
logfile:
class: logging.FileHandler
filename: logoutput.txt
level: DEBUG
formatter: simple
loggers:
circus:
level: DEBUG
handlers: [logfile]
propagate: no
root:
level: DEBUG
handlers: [logfile]
"""
EXPECTED_LOG_MESSAGE = "[INFO] Arbiter now waiting for commands"
def logging_dictconfig_to_ini(config):
assert config.get("version", 1) == 1, config
ini = ConfigParser()
ini.add_section("loggers")
loggers = config.get("loggers", {})
if "root" in config:
loggers["root"] = config["root"]
ini.set("loggers", "keys", ",".join(sorted(loggers.keys())))
for logger in sorted(loggers.keys()):
section = "logger_%s" % (logger.replace(".", "_"),)
ini.add_section(section)
for key, value in sorted(loggers[logger].items()):
if key == "handlers":
value = ",".join(value)
if key == "propagate":
value = "1" if value else "0"
ini.set(section, key, value)
ini.set(section, "qualname", logger)
ini.add_section("handlers")
handlers = config.get("handlers", {})
ini.set("handlers", "keys", ",".join(sorted(handlers.keys())))
for handler in sorted(handlers.keys()):
section = "handler_%s" % (handler,)
ini.add_section(section)
args = []
for key, value in sorted(handlers[handler].items()):
if (handlers[handler]["class"] == "logging.FileHandler"
and key == "filename"):
args.append(value)
else:
ini.set(section, key, value)
ini.set(section, "args", repr(tuple(args)))
ini.add_section("formatters")
formatters = config.get("formatters", {})
ini.set("formatters", "keys", ",".join(sorted(formatters.keys())))
for formatter in sorted(formatters.keys()):
section = "formatter_%s" % (formatter,)
ini.add_section(section)
for key, value in sorted(formatters[formatter].items()):
ini.set(section, key, value)
try:
# Older Python (without io.StringIO/io.BytesIO) and Python 3 use
# this code path.
result = StringIO()
ini.write(result)
return result.getvalue()
except TypeError:
# Python 2.7 has io.StringIO and io.BytesIO but ConfigParser.write
# has not been fixed to work with StringIO.
result = BytesIO()
ini.write(result)
return result.getvalue().decode("ascii")
def hasDictConfig():
return hasattr(logging.config, "dictConfig")
class TestLoggingConfig(TestCase):
def test_loggerconfig_default_ini(self):
logs = run_circusd(
[], {"circus.logoutput": "log_ini.txt"},
log_capture_path="log_ini.txt")
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
def test_loggerconfig_default_opt(self):
logs = run_circusd(
["--log-output", "log_opt.txt"], {},
log_capture_path="log_opt.txt")
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
@skipIf(not hasDictConfig(), "Needs logging.config.dictConfig()")
def test_loggerconfig_yaml_ini(self):
config = yaml.load(EXAMPLE_YAML)
config["handlers"]["logfile"]["filename"] = "log_yaml_ini.txt"
logs = run_circusd(
[], {"circus.loggerconfig": "logging.yaml"},
log_capture_path="log_yaml_ini.txt",
additional_files={"logging.yaml": yaml.dump(config)})
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
@skipIf(not hasDictConfig(), "Needs logging.config.dictConfig()")
def test_loggerconfig_yaml_opt(self):
config = yaml.load(EXAMPLE_YAML)
config["handlers"]["logfile"]["filename"] = "log_yaml_opt.txt"
logs = run_circusd(
["--logger-config", "logging.yaml"], {},
log_capture_path="log_yaml_opt.txt",
additional_files={"logging.yaml": yaml.dump(config)})
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
@skipIf(not hasDictConfig(), "Needs logging.config.dictConfig()")
def test_loggerconfig_json_ini(self):
config = yaml.load(EXAMPLE_YAML)
config["handlers"]["logfile"]["filename"] = "log_json_ini.txt"
logs = run_circusd(
[], {"circus.loggerconfig": "logging.json"},
log_capture_path="log_json_ini.txt",
additional_files={"logging.json": json.dumps(config)})
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
@skipIf(not hasDictConfig(), "Needs logging.config.dictConfig()")
def test_loggerconfig_json_opt(self):
config = yaml.load(EXAMPLE_YAML)
config["handlers"]["logfile"]["filename"] = "log_json_opt.txt"
logs = run_circusd(
["--logger-config", "logging.json"], {},
log_capture_path="log_json_opt.txt",
additional_files={"logging.json": json.dumps(config)})
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
def test_loggerconfig_ini_ini(self):
config = yaml.load(EXAMPLE_YAML)
config["handlers"]["logfile"]["filename"] = "log_ini_ini.txt"
logs = run_circusd(
[], {"circus.loggerconfig": "logging.ini"},
log_capture_path="log_ini_ini.txt",
additional_files={
"logging.ini": logging_dictconfig_to_ini(config)})
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
def test_loggerconfig_ini_opt(self):
config = yaml.load(EXAMPLE_YAML)
config["handlers"]["logfile"]["filename"] = "log_ini_opt.txt"
logs = run_circusd(
["--logger-config", "logging.ini"], {},
log_capture_path="log_ini_opt.txt",
additional_files={
"logging.ini": logging_dictconfig_to_ini(config)})
self.assertTrue(EXPECTED_LOG_MESSAGE in logs, logs)
test_suite = EasyTestSuite(__name__)
|
the-stack_106_27339 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one NodeConn connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "amorecoind"),
help="amorecoind binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0
min_work_node = NodeConnCB() # connects to node1
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], min_work_node))
test_node.add_connection(connections[0])
min_work_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
connections[0].disconnect_node()
test_node.wait_for_disconnect()
test_node = NodeConnCB() # connects to node (not whitelisted)
connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connections[0])
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
test_node = NodeConnCB() # connects to node (not whitelisted)
connections[0] = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
the-stack_106_27340 | import asyncio
import random
import discord
from discord.ext.buttons import Paginator
class Pag(Paginator):
async def teardown(self):
try:
await self.page.clear_reactions()
except discord.HTTPException:
pass
async def GetMessage(
bot, ctx, contentOne="Default Message", contentTwo="\uFEFF", timeout=100
):
"""
This function sends an embed containing the params and then waits for a message to return
Params:
- bot (commands.Bot object) :
- ctx (context object) : Used for sending msgs n stuff
- Optional Params:
- contentOne (string) : Embed title
- contentTwo (string) : Embed description
- timeout (int) : Timeout for wait_for
Returns:
- msg.content (string) : If a message is detected, the content will be returned
or
- False (bool) : If a timeout occurs
"""
embed = discord.Embed(
title=f"{contentOne}",
description=f"{contentTwo}",
colour=random.choice(bot.color_list),
)
sent = await ctx.send(embed=embed)
try:
msg = await bot.wait_for(
"message",
timeout=timeout,
check=lambda message: message.author == ctx.author
and message.channel == ctx.channel,
)
if msg:
await asyncio.sleep(1)
await sent.delete()
await msg.delete()
return msg.content
except asyncio.TimeoutError:
await sent.delete()
return False
|
the-stack_106_27341 | import weakref
from collections import namedtuple
import operator
from functools import partial
from llvmlite.llvmpy.core import Constant, Type, Builder
from numba import _dynfunc
from numba.core import (typing, utils, types, ir, debuginfo, funcdesc,
generators, config, ir_utils, cgutils)
from numba.core.errors import (LoweringError, new_error_context, TypingError,
LiteralTypingError, UnsupportedError)
from numba.core.funcdesc import default_mangler
class Environment(_dynfunc.Environment):
"""Stores globals and constant pyobjects for runtime.
It is often needed to convert b/w nopython objects and pyobjects.
"""
__slots__ = ('env_name', '__weakref__')
# A weak-value dictionary to store live environment with env_name as the
# key.
_memo = weakref.WeakValueDictionary()
@classmethod
def from_fndesc(cls, fndesc):
try:
# Avoid creating new Env
return cls._memo[fndesc.env_name]
except KeyError:
inst = cls(fndesc.lookup_globals())
inst.env_name = fndesc.env_name
cls._memo[fndesc.env_name] = inst
return inst
def __reduce__(self):
return _rebuild_env, (
self.globals['__name__'],
self.consts,
self.env_name,
)
def __del__(self):
return
def _rebuild_env(modname, consts, env_name):
if env_name in Environment._memo:
return Environment._memo[env_name]
from numba.core import serialize
mod = serialize._rebuild_module(modname)
env = Environment(mod.__dict__)
env.consts[:] = consts
return env
_VarArgItem = namedtuple("_VarArgItem", ("vararg", "index"))
class BaseLower(object):
"""
Lower IR to LLVM
"""
def __init__(self, context, library, fndesc, func_ir, metadata=None):
self.library = library
self.fndesc = fndesc
self.blocks = utils.SortedMap(utils.iteritems(func_ir.blocks))
self.func_ir = func_ir
self.call_conv = context.call_conv
self.generator_info = func_ir.generator_info
self.metadata = metadata
# Initialize LLVM
self.module = self.library.create_ir_module(self.fndesc.unique_name)
# Python execution environment (will be available to the compiled
# function).
self.env = Environment.from_fndesc(self.fndesc)
# Internal states
self.blkmap = {}
self.pending_phis = {}
self.varmap = {}
self.firstblk = min(self.blocks.keys())
self.loc = -1
# Specializes the target context as seen inside the Lowerer
# This adds:
# - environment: the python execution environment
self.context = context.subtarget(environment=self.env,
fndesc=self.fndesc)
# Debuginfo
dibuildercls = (self.context.DIBuilder
if self.context.enable_debuginfo
else debuginfo.DummyDIBuilder)
self.debuginfo = dibuildercls(module=self.module,
filepath=func_ir.loc.filename)
# Subclass initialization
self.init()
def init(self):
pass
def init_pyapi(self):
"""
Init the Python API and Environment Manager for the function being
lowered.
"""
if self.pyapi is not None:
return
self.pyapi = self.context.get_python_api(self.builder)
# Store environment argument for later use
self.env_manager = self.context.get_env_manager(self.builder)
self.env_body = self.env_manager.env_body
self.envarg = self.env_manager.env_ptr
def pre_lower(self):
"""
Called before lowering all blocks.
"""
# A given Lower object can be used for several LL functions
# (for generators) and it's important to use a new API and
# EnvironmentManager.
self.pyapi = None
self.debuginfo.mark_subprogram(function=self.builder.function,
name=self.fndesc.qualname,
loc=self.func_ir.loc)
def post_lower(self):
"""
Called after all blocks are lowered
"""
self.debuginfo.finalize()
def pre_block(self, block):
"""
Called before lowering a block.
"""
def post_block(self, block):
"""
Called after lowering a block.
"""
def return_exception(self, exc_class, exc_args=None, loc=None):
"""Propagate exception to the caller.
"""
self.call_conv.return_user_exc(
self.builder, exc_class, exc_args,
loc=loc, func_name=self.func_ir.func_id.func_name,
)
def set_exception(self, exc_class, exc_args=None, loc=None):
"""Set exception state in the current function.
"""
self.call_conv.set_static_user_exc(
self.builder, exc_class, exc_args,
loc=loc, func_name=self.func_ir.func_id.func_name,
)
def emit_environment_object(self):
"""Emit a pointer to hold the Environment object.
"""
# Define global for the environment and initialize it to NULL
envname = self.context.get_env_name(self.fndesc)
self.context.declare_env_global(self.module, envname)
def lower(self):
# Emit the Env into the module
self.emit_environment_object()
if self.generator_info is None:
self.genlower = None
self.lower_normal_function(self.fndesc)
else:
self.genlower = self.GeneratorLower(self)
self.gentype = self.genlower.gentype
self.genlower.lower_init_func(self)
self.genlower.lower_next_func(self)
if self.gentype.has_finalizer:
self.genlower.lower_finalize_func(self)
if config.DUMP_LLVM:
print(("LLVM DUMP %s" % self.fndesc).center(80, '-'))
if config.HIGHLIGHT_DUMPS:
try:
from pygments import highlight
from pygments.lexers import LlvmLexer as lexer
from pygments.formatters import Terminal256Formatter
from numba.misc.dump_style import by_colorscheme
print(highlight(self.module.__repr__(), lexer(),
Terminal256Formatter(
style=by_colorscheme())))
except ImportError:
msg = "Please install pygments to see highlighted dumps"
raise ValueError(msg)
else:
print(self.module)
print('=' * 80)
# Run target specific post lowering transformation
self.context.post_lowering(self.module, self.library)
# Materialize LLVM Module
self.library.add_ir_module(self.module)
def extract_function_arguments(self):
self.fnargs = self.call_conv.decode_arguments(self.builder,
self.fndesc.argtypes,
self.function)
return self.fnargs
def lower_normal_function(self, fndesc):
"""
Lower non-generator *fndesc*.
"""
self.setup_function(fndesc)
# Init argument values
self.extract_function_arguments()
entry_block_tail = self.lower_function_body()
# Close tail of entry block
self.builder.position_at_end(entry_block_tail)
self.builder.branch(self.blkmap[self.firstblk])
def lower_function_body(self):
"""
Lower the current function's body, and return the entry block.
"""
# Init Python blocks
for offset in self.blocks:
bname = "B%s" % offset
self.blkmap[offset] = self.function.append_basic_block(bname)
self.pre_lower()
# pre_lower() may have changed the current basic block
entry_block_tail = self.builder.basic_block
self.debug_print("# function begin: {0}".format(
self.fndesc.unique_name))
# Lower all blocks
for offset, block in sorted(self.blocks.items()):
bb = self.blkmap[offset]
self.builder.position_at_end(bb)
self.lower_block(block)
self.post_lower()
return entry_block_tail
def lower_block(self, block):
"""
Lower the given block.
"""
self.pre_block(block)
for inst in block.body:
self.loc = inst.loc
defaulterrcls = partial(LoweringError, loc=self.loc)
with new_error_context('lowering "{inst}" at {loc}', inst=inst,
loc=self.loc, errcls_=defaulterrcls):
self.lower_inst(inst)
self.post_block(block)
def create_cpython_wrapper(self, release_gil=False):
"""
Create CPython wrapper(s) around this function (or generator).
"""
if self.genlower:
self.context.create_cpython_wrapper(self.library,
self.genlower.gendesc,
self.env, self.call_helper,
release_gil=release_gil)
self.context.create_cpython_wrapper(self.library, self.fndesc,
self.env, self.call_helper,
release_gil=release_gil)
def create_cfunc_wrapper(self):
"""
Create C wrapper around this function.
"""
if self.genlower:
raise UnsupportedError('generator as a first-class function type')
self.context.create_cfunc_wrapper(self.library, self.fndesc,
self.env, self.call_helper)
def setup_function(self, fndesc):
# Setup function
self.function = self.context.declare_function(self.module, fndesc)
self.entry_block = self.function.append_basic_block('entry')
self.builder = Builder(self.entry_block)
self.call_helper = self.call_conv.init_call_helper(self.builder)
def typeof(self, varname):
return self.fndesc.typemap[varname]
def debug_print(self, msg):
if config.DEBUG_JIT:
self.context.debug_print(self.builder, "DEBUGJIT: {0}".format(msg))
# Dictionary mapping instruction class to its lowering function.
lower_extensions = {}
class Lower(BaseLower):
GeneratorLower = generators.GeneratorLower
def pre_block(self, block):
from numba.core.unsafe import eh
super(Lower, self).pre_block(block)
if block == self.firstblk:
# create slots for all the vars, irrespective of whether they are
# initialized, SSA will pick this up and warn users about using
# uninitialized variables. Slots are added as alloca in the first
# block
bb = self.blkmap[self.firstblk]
self.builder.position_at_end(bb)
all_names = set()
for block in self.blocks.values():
for x in block.find_insts(ir.Del):
if x.value not in all_names:
all_names.add(x.value)
for name in all_names:
fetype = self.typeof(name)
self._alloca_var(name, fetype)
# Detect if we are in a TRY block by looking for a call to
# `eh.exception_check`.
for call in block.find_exprs(op='call'):
defn = ir_utils.guard(
ir_utils.get_definition, self.func_ir, call.func,
)
if defn is not None and isinstance(defn, ir.Global):
if defn.value is eh.exception_check:
if isinstance(block.terminator, ir.Branch):
targetblk = self.blkmap[block.terminator.truebr]
# NOTE: This hacks in an attribute for call_conv to
# pick up. This hack is no longer needed when
# all old-style implementations are gone.
self.builder._in_try_block = {'target': targetblk}
break
def post_block(self, block):
# Clean-up
try:
del self.builder._in_try_block
except AttributeError:
pass
def lower_inst(self, inst):
# Set debug location for all subsequent LL instructions
self.debuginfo.mark_location(self.builder, self.loc)
self.debug_print(str(inst))
if isinstance(inst, ir.Assign):
ty = self.typeof(inst.target.name)
val = self.lower_assign(ty, inst)
self.storevar(val, inst.target.name)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
condty = self.typeof(inst.cond.name)
pred = self.context.cast(self.builder, cond, condty, types.boolean)
assert pred.type == Type.int(1), ("cond is not i1: %s" % pred.type)
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Return):
if self.generator_info:
# StopIteration
self.genlower.return_from_generator(self)
return
val = self.loadvar(inst.value.name)
oty = self.typeof(inst.value.name)
ty = self.fndesc.restype
if isinstance(ty, types.Optional):
# If returning an optional type
self.call_conv.return_optional_value(self.builder, ty, oty, val)
return
if ty != oty:
val = self.context.cast(self.builder, val, oty, ty)
retval = self.context.get_return_value(self.builder, ty, val)
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.StaticSetItem):
signature = self.fndesc.calltypes[inst]
assert signature is not None
try:
impl = self.context.get_function('static_setitem', signature)
except NotImplementedError:
return self.lower_setitem(inst.target, inst.index_var,
inst.value, signature)
else:
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
valuety = self.typeof(inst.value.name)
value = self.context.cast(self.builder, value, valuety,
signature.args[2])
return impl(self.builder, (target, inst.index, value))
elif isinstance(inst, ir.Print):
self.lower_print(inst)
elif isinstance(inst, ir.SetItem):
signature = self.fndesc.calltypes[inst]
assert signature is not None
return self.lower_setitem(inst.target, inst.index, inst.value,
signature)
elif isinstance(inst, ir.StoreMap):
signature = self.fndesc.calltypes[inst]
assert signature is not None
return self.lower_setitem(inst.dct, inst.key, inst.value, signature)
elif isinstance(inst, ir.DelItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
targetty = self.typeof(inst.target.name)
indexty = self.typeof(inst.index.name)
signature = self.fndesc.calltypes[inst]
assert signature is not None
op = operator.delitem
fnop = self.context.typing_context.resolve_value_type(op)
callsig = fnop.get_call_type(
self.context.typing_context, signature.args, {},
)
impl = self.context.get_function(fnop, callsig)
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
return impl(self.builder, (target, index))
elif isinstance(inst, ir.Del):
self.delvar(inst.value)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
signature = self.fndesc.calltypes[inst]
targetty = self.typeof(inst.target.name)
valuety = self.typeof(inst.value.name)
assert signature is not None
assert signature.args[0] == targetty
impl = self.context.get_setattr(inst.attr, signature)
# Convert argument to match
value = self.context.cast(self.builder, value, valuety,
signature.args[1])
return impl(self.builder, (target, value))
elif isinstance(inst, ir.StaticRaise):
self.lower_static_raise(inst)
elif isinstance(inst, ir.StaticTryRaise):
self.lower_static_try_raise(inst)
else:
for _class, func in lower_extensions.items():
if isinstance(inst, _class):
func(self, inst)
return
raise NotImplementedError(type(inst))
def lower_setitem(self, target_var, index_var, value_var, signature):
target = self.loadvar(target_var.name)
value = self.loadvar(value_var.name)
index = self.loadvar(index_var.name)
targetty = self.typeof(target_var.name)
valuety = self.typeof(value_var.name)
indexty = self.typeof(index_var.name)
op = operator.setitem
fnop = self.context.typing_context.resolve_value_type(op)
callsig = fnop.get_call_type(
self.context.typing_context, signature.args, {},
)
impl = self.context.get_function(fnop, callsig)
# Convert argument to match
if isinstance(targetty, types.Optional):
target = self.context.cast(self.builder, target, targetty,
targetty.type)
else:
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
value = self.context.cast(self.builder, value, valuety,
signature.args[2])
return impl(self.builder, (target, index, value))
def lower_static_raise(self, inst):
if inst.exc_class is None:
# Reraise
self.return_exception(None, loc=self.loc)
else:
self.return_exception(inst.exc_class, inst.exc_args, loc=self.loc)
def lower_static_try_raise(self, inst):
if inst.exc_class is None:
# Reraise
self.set_exception(None, loc=self.loc)
else:
self.set_exception(inst.exc_class, inst.exc_args, loc=self.loc)
def lower_assign(self, ty, inst):
value = inst.value
# In nopython mode, closure vars are frozen like globals
if isinstance(value, (ir.Const, ir.Global, ir.FreeVar)):
res = self.context.get_constant_generic(self.builder, ty,
value.value)
self.incref(ty, res)
return res
elif isinstance(value, ir.Expr):
return self.lower_expr(ty, value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
oty = self.typeof(value.name)
res = self.context.cast(self.builder, val, oty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Arg):
# Cast from the argument type to the local variable type
# (note the "arg.FOO" convention as used in typeinfer)
argty = self.typeof("arg." + value.name)
if isinstance(argty, types.Omitted):
pyval = argty.value
tyctx = self.context.typing_context
valty = tyctx.resolve_value_type_prefer_literal(pyval)
# use the type of the constant value
const = self.context.get_constant_generic(
self.builder, valty, pyval,
)
# cast it to the variable type
res = self.context.cast(self.builder, const, valty, ty)
else:
val = self.fnargs[value.index]
res = self.context.cast(self.builder, val, argty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Yield):
res = self.lower_yield(ty, value)
self.incref(ty, res)
return res
raise NotImplementedError(type(value), value)
def lower_yield(self, retty, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
y = generators.LowerYield(self, yp, yp.live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
typ = self.typeof(inst.value.name)
# cast the local val to the type yielded
yret = self.context.cast(self.builder, val, typ,
self.gentype.yield_type)
# get the return repr of yielded value
retval = self.context.get_return_value(self.builder, typ, yret)
# return
self.call_conv.return_value(self.builder, retval)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.context.get_constant_generic(self.builder, retty, None)
def lower_binop(self, resty, expr, op):
# if op in utils.OPERATORS_TO_BUILTINS:
# map operator.the_op => the corresponding types.Function()
# TODO: is this looks dodgy ...
op = self.context.typing_context.resolve_value_type(op)
lhs = expr.lhs
rhs = expr.rhs
static_lhs = expr.static_lhs
static_rhs = expr.static_rhs
lty = self.typeof(lhs.name)
rty = self.typeof(rhs.name)
lhs = self.loadvar(lhs.name)
rhs = self.loadvar(rhs.name)
# Convert argument to match
signature = self.fndesc.calltypes[expr]
lhs = self.context.cast(self.builder, lhs, lty, signature.args[0])
rhs = self.context.cast(self.builder, rhs, rty, signature.args[1])
def cast_result(res):
return self.context.cast(self.builder, res,
signature.return_type, resty)
# First try with static operands, if known
def try_static_impl(tys, args):
if any(a is ir.UNDEFINED for a in args):
return None
try:
if isinstance(op, types.Function):
static_sig = op.get_call_type(self.context.typing_context,
tys, {})
else:
static_sig = typing.signature(signature.return_type, *tys)
except TypingError:
return None
try:
static_impl = self.context.get_function(op, static_sig)
return static_impl(self.builder, args)
except NotImplementedError:
return None
res = try_static_impl(
(_lit_or_omitted(static_lhs), _lit_or_omitted(static_rhs)),
(static_lhs, static_rhs),
)
if res is not None:
return cast_result(res)
res = try_static_impl(
(_lit_or_omitted(static_lhs), rty),
(static_lhs, rhs),
)
if res is not None:
return cast_result(res)
res = try_static_impl(
(lty, _lit_or_omitted(static_rhs)),
(lhs, static_rhs),
)
if res is not None:
return cast_result(res)
# Normal implementation for generic arguments
sig = op.get_call_type(self.context.typing_context, signature.args, {})
impl = self.context.get_function(op, sig)
res = impl(self.builder, (lhs, rhs))
return cast_result(res)
def lower_getitem(self, resty, expr, value, index, signature):
baseval = self.loadvar(value.name)
indexval = self.loadvar(index.name)
# Get implementation of getitem
op = operator.getitem
fnop = self.context.typing_context.resolve_value_type(op)
callsig = fnop.get_call_type(
self.context.typing_context, signature.args, {},
)
impl = self.context.get_function(fnop, callsig)
argvals = (baseval, indexval)
argtyps = (self.typeof(value.name),
self.typeof(index.name))
castvals = [self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps,
signature.args)]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res,
signature.return_type,
resty)
def _cast_var(self, var, ty):
"""
Cast a Numba IR variable to the given Numba type, returning a
low-level value.
"""
if isinstance(var, _VarArgItem):
varty = self.typeof(var.vararg.name)[var.index]
val = self.builder.extract_value(self.loadvar(var.vararg.name),
var.index)
else:
varty = self.typeof(var.name)
val = self.loadvar(var.name)
return self.context.cast(self.builder, val, varty, ty)
def fold_call_args(self, fnty, signature, pos_args, vararg, kw_args):
if vararg:
# Inject *args from function call
# The lowering will be done in _cast_var() above.
tp_vararg = self.typeof(vararg.name)
assert isinstance(tp_vararg, types.BaseTuple)
pos_args = pos_args + [_VarArgItem(vararg, i)
for i in range(len(tp_vararg))]
# Fold keyword arguments and resolve default argument values
pysig = signature.pysig
if pysig is None:
if kw_args:
raise NotImplementedError("unsupported keyword arguments "
"when calling %s" % (fnty,))
argvals = [self._cast_var(var, sigty)
for var, sigty in zip(pos_args, signature.args)]
else:
def normal_handler(index, param, var):
return self._cast_var(var, signature.args[index])
def default_handler(index, param, default):
return self.context.get_constant_generic(
self.builder, signature.args[index], default)
def stararg_handler(index, param, vars):
stararg_ty = signature.args[index]
assert isinstance(stararg_ty, types.BaseTuple), stararg_ty
values = [self._cast_var(var, sigty)
for var, sigty in zip(vars, stararg_ty)]
return cgutils.make_anonymous_struct(self.builder, values)
argvals = typing.fold_arguments(pysig,
pos_args, dict(kw_args),
normal_handler,
default_handler,
stararg_handler)
return argvals
def lower_print(self, inst):
"""
Lower a ir.Print()
"""
# We handle this, as far as possible, as a normal call to built-in
# print(). This will make it easy to undo the special ir.Print
# rewrite when it becomes unnecessary (e.g. when we have native
# strings).
sig = self.fndesc.calltypes[inst]
assert sig.return_type == types.none
fnty = self.context.typing_context.resolve_value_type(print)
# Fix the call signature to inject any constant-inferred
# string argument
pos_tys = list(sig.args)
pos_args = list(inst.args)
for i in range(len(pos_args)):
if i in inst.consts:
pyval = inst.consts[i]
if isinstance(pyval, str):
pos_tys[i] = types.literal(pyval)
fixed_sig = typing.signature(sig.return_type, *pos_tys)
fixed_sig = fixed_sig.replace(pysig=sig.pysig)
argvals = self.fold_call_args(fnty, sig, pos_args, inst.vararg, {})
impl = self.context.get_function(print, fixed_sig)
impl(self.builder, argvals)
def lower_call(self, resty, expr):
signature = self.fndesc.calltypes[expr]
self.debug_print("# lower_call: expr = {0}".format(expr))
if isinstance(signature.return_type, types.Phantom):
return self.context.get_dummy_value()
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
else:
fnty = self.typeof(expr.func.name)
if isinstance(fnty, types.ObjModeDispatcher):
res = self._lower_call_ObjModeDispatcher(fnty, expr, signature)
elif isinstance(fnty, types.ExternalFunction):
res = self._lower_call_ExternalFunction(fnty, expr, signature)
elif isinstance(fnty, types.ExternalFunctionPointer):
res = self._lower_call_ExternalFunctionPointer(
fnty, expr, signature)
elif isinstance(fnty, types.RecursiveCall):
res = self._lower_call_RecursiveCall(fnty, expr, signature)
elif isinstance(fnty, types.FunctionType):
res = self._lower_call_FunctionType(fnty, expr, signature)
else:
res = self._lower_call_normal(fnty, expr, signature)
# If lowering the call returned None, interpret that as returning dummy
# value if the return type of the function is void, otherwise there is
# a problem
if res is None:
if signature.return_type == types.void:
res = self.context.get_dummy_value()
else:
raise LoweringError(
msg="non-void function returns None from implementation",
loc=self.loc
)
return self.context.cast(self.builder, res, signature.return_type,
resty)
def _lower_call_ObjModeDispatcher(self, fnty, expr, signature):
self.init_pyapi()
# Acquire the GIL
gil_state = self.pyapi.gil_ensure()
# Fix types
argnames = [a.name for a in expr.args]
argtypes = [self.typeof(a) for a in argnames]
argvalues = [self.loadvar(a) for a in argnames]
for v, ty in zip(argvalues, argtypes):
# Because .from_native_value steal the reference
self.incref(ty, v)
argobjs = [self.pyapi.from_native_value(atyp, aval,
self.env_manager)
for atyp, aval in zip(argtypes, argvalues)]
# Make Call
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = self.context.add_dynamic_addr(
self.builder,
id(entry_pt),
info="with_objectmode",
)
ret_obj = self.pyapi.call_function_objargs(callee, argobjs)
has_exception = cgutils.is_null(self.builder, ret_obj)
with self. builder.if_else(has_exception) as (then, orelse):
# Handles exception
# This branch must exit the function
with then:
# Clean arg
for obj in argobjs:
self.pyapi.decref(obj)
# Release the GIL
self.pyapi.gil_release(gil_state)
# Return and signal exception
self.call_conv.return_exc(self.builder)
# Handles normal return
with orelse:
# Fix output value
native = self.pyapi.to_native_value(
fnty.dispatcher.output_types,
ret_obj,
)
output = native.value
# Release objs
self.pyapi.decref(ret_obj)
for obj in argobjs:
self.pyapi.decref(obj)
# cleanup output
if callable(native.cleanup):
native.cleanup()
# Release the GIL
self.pyapi.gil_release(gil_state)
# Error during unboxing
with self.builder.if_then(native.is_error):
self.call_conv.return_exc(self.builder)
return output
def _lower_call_ExternalFunction(self, fnty, expr, signature):
# Handle a named external function
self.debug_print("# external function")
argvals = self.fold_call_args(
fnty, signature, expr.args, expr.vararg, expr.kws,
)
fndesc = funcdesc.ExternalFunctionDescriptor(
fnty.symbol, fnty.sig.return_type, fnty.sig.args)
func = self.context.declare_external_function(
self.builder.module, fndesc)
return self.context.call_external_function(
self.builder, func, fndesc.argtypes, argvals,
)
def _lower_call_ExternalFunctionPointer(self, fnty, expr, signature):
# Handle a C function pointer
self.debug_print("# calling external function pointer")
argvals = self.fold_call_args(
fnty, signature, expr.args, expr.vararg, expr.kws,
)
pointer = self.loadvar(expr.func.name)
# If the external function pointer uses libpython
if fnty.requires_gil:
self.init_pyapi()
# Acquire the GIL
gil_state = self.pyapi.gil_ensure()
# Make PyObjects
newargvals = []
pyvals = []
for exptyp, gottyp, aval in zip(fnty.sig.args, signature.args,
argvals):
# Adjust argument values to pyobjects
if exptyp == types.ffi_forced_object:
self.incref(gottyp, aval)
obj = self.pyapi.from_native_value(
gottyp, aval, self.env_manager,
)
newargvals.append(obj)
pyvals.append(obj)
else:
newargvals.append(aval)
# Call external function
res = self.context.call_function_pointer(
self.builder, pointer, newargvals, fnty.cconv,
)
# Release PyObjects
for obj in pyvals:
self.pyapi.decref(obj)
# Release the GIL
self.pyapi.gil_release(gil_state)
# If the external function pointer does NOT use libpython
else:
res = self.context.call_function_pointer(
self.builder, pointer, argvals, fnty.cconv,
)
return res
def _lower_call_RecursiveCall(self, fnty, expr, signature):
# Recursive call
argvals = self.fold_call_args(
fnty, signature, expr.args, expr.vararg, expr.kws,
)
qualprefix = fnty.overloads[signature.args]
mangler = self.context.mangler or default_mangler
mangled_name = mangler(qualprefix, signature.args)
# special case self recursion
if self.builder.function.name.startswith(mangled_name):
res = self.context.call_internal(
self.builder, self.fndesc, signature, argvals,
)
else:
res = self.context.call_unresolved(
self.builder, mangled_name, signature, argvals,
)
return res
def _lower_call_FunctionType(self, fnty, expr, signature):
self.debug_print("# calling first-class function type")
sig = types.unliteral(signature)
if not fnty.check_signature(signature):
# value dependent polymorphism?
raise UnsupportedError(
f'mismatch of function types:'
f' expected {fnty} but got {types.FunctionType(sig)}')
ftype = fnty.ftype
argvals = self.fold_call_args(
fnty, sig, expr.args, expr.vararg, expr.kws,
)
func_ptr = self.__get_function_pointer(ftype, expr.func.name, sig=sig)
res = self.builder.call(func_ptr, argvals, cconv=fnty.cconv)
return res
def __get_function_pointer(self, ftype, fname, sig=None):
from numba.experimental.function_type import lower_get_wrapper_address
llty = self.context.get_value_type(ftype)
fstruct = self.loadvar(fname)
addr = self.builder.extract_value(fstruct, 0,
name='addr_of_%s' % (fname))
fptr = cgutils.alloca_once(self.builder, llty,
name="fptr_of_%s" % (fname))
with self.builder.if_else(
cgutils.is_null(self.builder, addr),
likely=False) as (then, orelse):
with then:
self.init_pyapi()
# Acquire the GIL
gil_state = self.pyapi.gil_ensure()
pyaddr = self.builder.extract_value(
fstruct, 1,
name='pyaddr_of_%s' % (fname))
# try to recover the function address, see
# test_zero_address BadToGood example in
# test_function_type.py
addr1 = lower_get_wrapper_address(
self.context, self.builder, pyaddr, sig,
failure_mode='ignore')
with self.builder.if_then(
cgutils.is_null(self.builder, addr1), likely=False):
self.return_exception(
RuntimeError,
exc_args=(f"{ftype} function address is null",),
loc=self.loc)
addr2 = self.pyapi.long_as_voidptr(addr1)
self.builder.store(self.builder.bitcast(addr2, llty), fptr)
self.pyapi.decref(addr1)
self.pyapi.gil_release(gil_state)
with orelse:
self.builder.store(self.builder.bitcast(addr, llty), fptr)
return self.builder.load(fptr)
def _lower_call_normal(self, fnty, expr, signature):
# Normal function resolution
self.debug_print("# calling normal function: {0}".format(fnty))
self.debug_print("# signature: {0}".format(signature))
if (isinstance(expr.func, ir.Intrinsic) or
isinstance(fnty, types.ObjModeDispatcher)):
argvals = expr.func.args
else:
argvals = self.fold_call_args(
fnty, signature, expr.args, expr.vararg, expr.kws,
)
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
argvals = [the_self] + list(argvals)
res = impl(self.builder, argvals, self.loc)
return res
def lower_expr(self, resty, expr):
if expr.op == 'binop':
return self.lower_binop(resty, expr, expr.fn)
elif expr.op == 'inplace_binop':
lty = self.typeof(expr.lhs.name)
if lty.mutable:
return self.lower_binop(resty, expr, expr.fn)
else:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.)
return self.lower_binop(resty, expr, expr.immutable_fn)
elif expr.op == 'unary':
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
func_ty = self.context.typing_context.resolve_value_type(expr.fn)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(func_ty, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
res = self.context.cast(self.builder, res,
signature.return_type, resty)
return res
elif expr.op == 'call':
res = self.lower_call(resty, expr)
return res
elif expr.op == 'pair_first':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_first(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op == 'pair_second':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_second(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op in ('getiter', 'iternext'):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
res = self.context.cast(self.builder, res, signature.return_type,
resty)
return res
elif expr.op == 'exhaust_iter':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# Unpack optional
if isinstance(ty, types.Optional):
val = self.context.cast(self.builder, val, ty, ty.type)
ty = ty.type
# If we have a tuple, we needn't do anything
# (and we can't iterate over the heterogeneous ones).
if isinstance(ty, types.BaseTuple):
assert ty == resty
self.incref(ty, val)
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function('getiter',
getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function('iternext',
iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder,
pair, pairty)
with cgutils.if_unlikely(self.builder,
self.builder.not_(is_valid)):
self.return_exception(ValueError, loc=self.loc)
item = self.context.pair_first(self.builder,
pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder,
pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError, loc=self.loc)
self.decref(ty.iterator_type, iterobj)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
casted = self.context.cast(self.builder, val, ty, resty.this)
res = self.context.get_bound_function(self.builder, casted,
resty.this)
self.incref(resty, res)
return res
else:
impl = self.context.get_getattr(ty, expr.attr)
attrty = self.context.typing_context.resolve_getattr(ty,
expr.attr)
if impl is None:
# ignore the attribute
return self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
# Cast the attribute type to the expected output type
res = self.context.cast(self.builder, res, attrty, resty)
return res
elif expr.op == "static_getitem":
signature = typing.signature(
resty,
self.typeof(expr.value.name),
_lit_or_omitted(expr.index),
)
try:
# Both get_function() and the returned implementation can
# raise NotImplementedError if the types aren't supported
impl = self.context.get_function("static_getitem", signature)
return impl(self.builder,
(self.loadvar(expr.value.name), expr.index))
except NotImplementedError:
if expr.index_var is None:
raise
# Fall back on the generic getitem() implementation
# for this type.
signature = self.fndesc.calltypes[expr]
return self.lower_getitem(resty, expr, expr.value,
expr.index_var, signature)
elif expr.op == "typed_getitem":
signature = typing.signature(
resty,
self.typeof(expr.value.name),
self.typeof(expr.index.name),
)
impl = self.context.get_function("typed_getitem", signature)
return impl(self.builder, (self.loadvar(expr.value.name),
self.loadvar(expr.index.name)))
elif expr.op == "getitem":
signature = self.fndesc.calltypes[expr]
return self.lower_getitem(resty, expr, expr.value, expr.index,
signature)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)]
tup = self.context.make_tuple(self.builder, resty, castvals)
self.incref(resty, tup)
return tup
elif expr.op == "build_list":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty,
resty.dtype)
for val, fromty in zip(itemvals, itemtys)]
return self.context.build_list(self.builder, resty, castvals)
elif expr.op == "build_set":
# Insert in reverse order, as Python does
items = expr.items[::-1]
itemvals = [self.loadvar(i.name) for i in items]
itemtys = [self.typeof(i.name) for i in items]
castvals = [self.context.cast(self.builder, val, fromty,
resty.dtype)
for val, fromty in zip(itemvals, itemtys)]
return self.context.build_set(self.builder, resty, castvals)
elif expr.op == "build_map":
items = expr.items
keys, values = [], []
key_types, value_types = [], []
for k, v in items:
key = self.loadvar(k.name)
keytype = self.typeof(k.name)
val = self.loadvar(v.name)
valtype = self.typeof(v.name)
keys.append(key)
values.append(val)
key_types.append(keytype)
value_types.append(valtype)
return self.context.build_map(self.builder, resty,
list(zip(key_types, value_types)),
list(zip(keys, values)))
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
self.incref(resty, castval)
return castval
elif expr.op == "phi":
raise LoweringError("PHI not stripped")
elif expr.op == 'null':
return self.context.get_constant_null(resty)
elif expr.op in self.context.special_ops:
res = self.context.special_ops[expr.op](self, expr)
return res
raise NotImplementedError(expr)
def _alloca_var(self, name, fetype):
"""
Ensure the given variable has an allocated stack slot.
"""
if name not in self.varmap:
# If not already defined, allocate it
llty = self.context.get_value_type(fetype)
ptr = self.alloca_lltype(name, llty)
# Remember the pointer
self.varmap[name] = ptr
def getvar(self, name):
"""
Get a pointer to the given variable's slot.
"""
return self.varmap[name]
def loadvar(self, name):
"""
Load the given variable's value.
"""
ptr = self.getvar(name)
return self.builder.load(ptr)
def storevar(self, value, name):
"""
Store the value into the given variable.
"""
fetype = self.typeof(name)
# Define if not already
self._alloca_var(name, fetype)
# Clean up existing value stored in the variable
old = self.loadvar(name)
self.decref(fetype, old)
# Store variable
ptr = self.getvar(name)
if value.type != ptr.type.pointee:
msg = ("Storing {value.type} to ptr of {ptr.type.pointee} "
"('{name}'). FE type {fetype}").format(value=value,
ptr=ptr,
fetype=fetype,
name=name)
raise AssertionError(msg)
self.builder.store(value, ptr)
def delvar(self, name):
"""
Delete the given variable.
"""
fetype = self.typeof(name)
# Define if not already (may happen if the variable is deleted
# at the beginning of a loop, but only set later in the loop)
self._alloca_var(name, fetype)
ptr = self.getvar(name)
self.decref(fetype, self.builder.load(ptr))
# Zero-fill variable to avoid double frees on subsequent dels
self.builder.store(Constant.null(ptr.type.pointee), ptr)
def alloca(self, name, type):
lltype = self.context.get_value_type(type)
return self.alloca_lltype(name, lltype)
def alloca_lltype(self, name, lltype):
# Is user variable?
is_uservar = not name.startswith('$')
# Allocate space for variable
aptr = cgutils.alloca_once(self.builder, lltype,
name=name, zfill=False)
if is_uservar:
# Emit debug info for user variable
sizeof = self.context.get_abi_sizeof(lltype)
self.debuginfo.mark_variable(self.builder, aptr, name=name,
lltype=lltype, size=sizeof,
loc=self.loc)
return aptr
def incref(self, typ, val):
if not self.context.enable_nrt:
return
self.context.nrt.incref(self.builder, typ, val)
def decref(self, typ, val):
if not self.context.enable_nrt:
return
self.context.nrt.decref(self.builder, typ, val)
def _lit_or_omitted(value):
"""Returns a Literal instance if the type of value is supported;
otherwise, return `Omitted(value)`.
"""
try:
return types.literal(value)
except LiteralTypingError:
return types.Omitted(value)
|
the-stack_106_27342 | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
start = ListNode(0)
res = ListNode(0)
start.next = res
bit = 0
while l1 and l2:
res.next = ListNode(l1.val + l2.val + bit)
res = res.next
if res.val < 10:
bit = 0
else:
bit = 1
res.val -= 10
l1, l2 = l1.next, l2.next
res.next = l1 if l1 else l2
while bit:
if res.next:
res.next.val += 1
res = res.next
if res.val < 10:
bit = 0
else:
bit = 1
res.val -= 10
else:
res.next = ListNode(1)
break
return start.next.next
# for testing, not included in solution
def initial(nums):
start = ListNode(0)
res = ListNode(nums[0])
start.next = res
for i in nums[1:]:
res.next = ListNode(i)
res = res.next
return start.next
def traversal(node):
res = []
while node:
res.append(node.val)
node = node.next
print(res)
|
the-stack_106_27343 | from robolearn.utils.samplers import rollout
from robolearn.torch.core import PyTorchModule
from robolearn.torch.utils.pytorch_util import set_gpu_mode
from robolearn.envs.normalized_box_env import NormalizedBoxEnv
from robolearn_gym_envs.pybullet import CentauroTrayEnv
from robolearn.torch.policies import MultiPolicySelector
from robolearn.torch.policies import WeightedMultiPolicySelector
from robolearn.torch.policies import TanhGaussianPolicy
from robolearn.models.policies import MakeDeterministic
from robolearn.models.policies import ExplorationPolicy
import os
from robolearn.utils.plots import plot_reward_composition
from robolearn.utils.plots import plot_reward_iu
from robolearn.utils.plots import plot_weigths_unintentionals
from robolearn.utils.plots import plot_q_vals
import argparse
import joblib
import uuid
from robolearn.utils.logging import logger
import json
import numpy as np
import robolearn.torch.utils.pytorch_util as ptu
filename = str(uuid.uuid4())
SEED = 110
def simulate_policy(args):
np.random.seed(SEED)
ptu.seed(SEED)
data = joblib.load(args.file)
if args.deterministic:
if args.un > -1:
print('Using the deterministic version of the UNintentional policy '
'%02d.' % args.un)
if 'u_policy' in data:
policy = MakeDeterministic(
MultiPolicySelector(data['u_policy'], args.un))
# WeightedMultiPolicySelector(data['u_policy'], args.un))
else:
# policy = MakeDeterministic(data['u_policies'][args.un])
if isinstance(data['policy'], TanhGaussianPolicy):
policy = MakeDeterministic(data['policy'])
else:
policy = MakeDeterministic(
WeightedMultiPolicySelector(data['policy'], args.un)
)
else:
print('Using the deterministic version of the Intentional policy.')
if isinstance(data['policy'], ExplorationPolicy):
policy = MakeDeterministic(data['policy'])
else:
policy = data['policy']
else:
if args.un > -1:
print('Using the UNintentional stochastic policy %02d' % args.un)
if 'u_policy' in data:
# policy = MultiPolicySelector(data['u_policy'], args.un)
policy = WeightedMultiPolicySelector(data['u_policy'], args.un)
else:
policy = WeightedMultiPolicySelector(data['policy'], args.un)
# policy = data['policy'][args.un]
else:
print('Using the Intentional stochastic policy.')
# policy = data['exploration_policy']
policy = data['policy']
print("Policy loaded!!")
# Load environment
dirname = os.path.dirname(args.file)
with open(os.path.join(dirname, 'variant.json')) as json_data:
log_data = json.load(json_data)
env_params = log_data['env_params']
H = int(log_data['path_length'])
env_params['is_render'] = True
if 'obs_mean' in data.keys():
obs_mean = data['obs_mean']
print('OBS_MEAN')
print(repr(obs_mean))
else:
obs_mean = None
# obs_mean = np.array([ 0.07010766, 0.37585765, 0.21402615, 0.24426296, 0.5789634 ,
# 0.88510203, 1.6878743 , 0.02656335, 0.03794186, -1.0241051 ,
# -0.5226027 , 0.6198239 , 0.49062446, 0.01197532, 0.7888951 ,
# -0.4857273 , 0.69160587, -0.00617676, 0.08966777, -0.14694819,
# 0.9559917 , 1.0450271 , -0.40958315, 0.86435956, 0.00609685,
# -0.01115279, -0.21607827, 0.9762933 , 0.80748135, -0.48661205,
# 0.7473679 , 0.01649722, 0.15451911, -0.17285274, 0.89978695])
if 'obs_var' in data.keys():
obs_var = data['obs_var']
print('OBS_VAR')
print(repr(obs_var))
else:
obs_var = None
# obs_var = np.array([0.10795759, 0.12807205, 0.9586606 , 0.46407 , 0.8994803 ,
# 0.35167143, 0.30286264, 0.34667444, 0.35105848, 1.9919134 ,
# 0.9462659 , 2.245269 , 0.84190637, 1.5407104 , 0.1 ,
# 0.10330457, 0.1 , 0.1 , 0.1 , 0.1528581 ,
# 0.1 , 0.1 , 0.1 , 0.1 , 0.1 ,
# 0.1 , 0.1 , 0.1 , 0.1 , 0.12320185,
# 0.1 , 0.18369523, 0.200373 , 0.11895574, 0.15118493])
print(env_params)
if args.subtask and args.un != -1:
env_params['subtask'] = args.un
# else:
# env_params['subtask'] = None
env = NormalizedBoxEnv(
CentauroTrayEnv(**env_params),
# normalize_obs=True,
normalize_obs=False,
online_normalization=False,
obs_mean=None,
obs_var=None,
obs_alpha=0.001,
)
print("Environment loaded!!")
if args.gpu:
set_gpu_mode(True)
policy.cuda()
if isinstance(policy, MakeDeterministic):
if isinstance(policy.stochastic_policy, PyTorchModule):
policy.stochastic_policy.train(False)
else:
if isinstance(policy, PyTorchModule):
policy.train(False)
while True:
if args.record:
rollout_start_fcn = lambda: \
env.start_recording_video('centauro_video.mp4')
rollout_end_fcn = lambda: \
env.stop_recording_video()
else:
rollout_start_fcn = None
rollout_end_fcn = None
obs_normalizer = data.get('obs_normalizer')
if args.H != -1:
H = args.H
path = rollout(
env,
policy,
max_path_length=H,
animated=True,
obs_normalizer=obs_normalizer,
rollout_start_fcn=rollout_start_fcn,
rollout_end_fcn=rollout_end_fcn,
)
plot_rollout_reward(path)
if hasattr(env, "log_diagnostics"):
env.log_diagnostics([path])
logger.dump_tabular()
if args.record:
break
def plot_rollout_reward(path):
import matplotlib.pyplot as plt
rewards = np.squeeze(path['rewards'])
plt.plot(rewards)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, default='./params.pkl',
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=-1,
help='Max length of rollout')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--deterministic', action="store_true")
parser.add_argument('--record', action="store_true")
parser.add_argument('--env', type=str, default='manipulator')
parser.add_argument('--un', type=int, default=-1,
help='Unintentional id')
parser.add_argument('--subtask', action='store_true')
args = parser.parse_args()
simulate_policy(args)
input('Press a key to finish the script')
|
the-stack_106_27344 | import numpy as np
from . import is_scalar_nan
from .fixes import _object_dtype_isnan
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if is_scalar_nan(value_to_mask):
if X.dtype.kind == "f":
return np.isnan(X)
elif X.dtype.kind in ("i", "u"):
# can't have NaNs in integer array.
return np.zeros(X.shape, dtype=bool)
else:
# np.isnan does not work on object dtypes.
return _object_dtype_isnan(X)
else:
# X == value_to_mask with object dtypes does not always perform
# element-wise for old versions of numpy
return np.equal(X, value_to_mask)
|
the-stack_106_27348 | import pickle
import os
from os.path import join, expanduser
from invoke.util import six
from mock import patch, call, Mock
import pytest
from pytest_relaxed import raises
from invoke.runners import Local
from invoke.config import Config
from invoke.exceptions import (
AmbiguousEnvVar,
UncastableEnvVar,
UnknownFileType,
UnpicklableConfigMember,
)
from _util import skip_if_windows, support
pytestmark = pytest.mark.usefixtures("integration")
CONFIGS_PATH = "configs"
TYPES = ("yaml", "yml", "json", "python")
def _load(kwarg, type_, **kwargs):
path = join(CONFIGS_PATH, type_ + "/")
kwargs[kwarg] = path
return Config(**kwargs)
class Config_:
class class_attrs:
# TODO: move all other non-data-bearing kwargs to this mode
class prefix:
def defaults_to_invoke(self):
assert Config().prefix == "invoke"
@patch.object(Config, "_load_yaml")
def informs_config_filenames(self, load_yaml):
class MyConf(Config):
prefix = "other"
MyConf(system_prefix="dir/")
load_yaml.assert_any_call("dir/other.yaml")
def informs_env_var_prefix(self):
os.environ["OTHER_FOO"] = "bar"
class MyConf(Config):
prefix = "other"
c = MyConf(defaults={"foo": "notbar"})
c.load_shell_env()
assert c.foo == "bar"
class file_prefix:
def defaults_to_None(self):
assert Config().file_prefix is None
@patch.object(Config, "_load_yaml")
def informs_config_filenames(self, load_yaml):
class MyConf(Config):
file_prefix = "other"
MyConf(system_prefix="dir/")
load_yaml.assert_any_call("dir/other.yaml")
class env_prefix:
def defaults_to_None(self):
assert Config().env_prefix is None
def informs_env_vars_loaded(self):
os.environ["OTHER_FOO"] = "bar"
class MyConf(Config):
env_prefix = "other"
c = MyConf(defaults={"foo": "notbar"})
c.load_shell_env()
assert c.foo == "bar"
class global_defaults:
@skip_if_windows
def basic_settings(self):
# Just a catchall for what the baseline config settings should
# be...for some reason we're not actually capturing all of these
# reliably (even if their defaults are often implied by the tests
# which override them, e.g. runner tests around warn=True, etc).
expected = {
"run": {
"asynchronous": False,
"disown": False,
"dry": False,
"echo": False,
"echo_format": "\033[1;37m{command}\033[0m",
"echo_stdin": None,
"encoding": None,
"env": {},
"err_stream": None,
"fallback": True,
"hide": None,
"in_stream": None,
"out_stream": None,
"pty": False,
"replace_env": False,
"shell": "/bin/bash",
"warn": False,
"watchers": [],
},
"runners": {"local": Local},
"sudo": {
"password": None,
"prompt": "[sudo] password: ",
"user": None,
},
"tasks": {
"auto_dash_names": True,
"collection_name": "tasks",
"dedupe": True,
"executor_class": None,
"search_root": None,
},
"timeouts": {"command": None},
}
assert Config.global_defaults() == expected
class init:
"__init__"
def can_be_empty(self):
assert Config().__class__ == Config # derp
@patch.object(Config, "_load_yaml")
def configure_global_location_prefix(self, load_yaml):
# This is a bit funky but more useful than just replicating the
# same test farther down?
Config(system_prefix="meh/")
load_yaml.assert_any_call("meh/invoke.yaml")
@skip_if_windows
@patch.object(Config, "_load_yaml")
def default_system_prefix_is_etc(self, load_yaml):
# TODO: make this work on Windows somehow without being a total
# tautology? heh.
Config()
load_yaml.assert_any_call("/etc/invoke.yaml")
@patch.object(Config, "_load_yaml")
def configure_user_location_prefix(self, load_yaml):
Config(user_prefix="whatever/")
load_yaml.assert_any_call("whatever/invoke.yaml")
@patch.object(Config, "_load_yaml")
def default_user_prefix_is_homedir_plus_dot(self, load_yaml):
Config()
load_yaml.assert_any_call(expanduser("~/.invoke.yaml"))
@patch.object(Config, "_load_yaml")
def configure_project_location(self, load_yaml):
Config(project_location="someproject").load_project()
load_yaml.assert_any_call(join("someproject", "invoke.yaml"))
@patch.object(Config, "_load_yaml")
def configure_runtime_path(self, load_yaml):
Config(runtime_path="some/path.yaml").load_runtime()
load_yaml.assert_any_call("some/path.yaml")
def accepts_defaults_dict_kwarg(self):
c = Config(defaults={"super": "low level"})
assert c.super == "low level"
def overrides_dict_is_first_posarg(self):
c = Config({"new": "data", "run": {"hide": True}})
assert c.run.hide is True # default is False
assert c.run.warn is False # in global defaults, untouched
assert c.new == "data" # data only present at overrides layer
def overrides_dict_is_also_a_kwarg(self):
c = Config(overrides={"run": {"hide": True}})
assert c.run.hide is True
@patch.object(Config, "load_system")
@patch.object(Config, "load_user")
@patch.object(Config, "merge")
def system_and_user_files_loaded_automatically(
self, merge, load_u, load_s
):
Config()
load_s.assert_called_once_with(merge=False)
load_u.assert_called_once_with(merge=False)
merge.assert_called_once_with()
@patch.object(Config, "load_system")
@patch.object(Config, "load_user")
def can_defer_loading_system_and_user_files(self, load_u, load_s):
config = Config(lazy=True)
assert not load_s.called
assert not load_u.called
# Make sure default levels are still in place! (When bug present,
# i.e. merge() never called, config appears effectively empty.)
assert config.run.echo is False
class basic_API:
"Basic API components"
def can_be_used_directly_after_init(self):
# No load() here...
c = Config({"lots of these": "tests look similar"})
assert c["lots of these"] == "tests look similar"
def allows_dict_and_attr_access(self):
# TODO: combine with tests for Context probably
c = Config({"foo": "bar"})
assert c.foo == "bar"
assert c["foo"] == "bar"
def nested_dict_values_also_allow_dual_access(self):
# TODO: ditto
c = Config({"foo": "bar", "biz": {"baz": "boz"}})
# Sanity check - nested doesn't somehow kill simple top level
assert c.foo == "bar"
assert c["foo"] == "bar"
# Actual check
assert c.biz.baz == "boz"
assert c["biz"]["baz"] == "boz"
assert c.biz["baz"] == "boz"
assert c["biz"].baz == "boz"
def attr_access_has_useful_error_msg(self):
c = Config()
try:
c.nope
except AttributeError as e:
expected = """
No attribute or config key found for 'nope'
Valid keys: ['run', 'runners', 'sudo', 'tasks', 'timeouts']
Valid real attributes: ['clear', 'clone', 'env_prefix', 'file_prefix', 'from_data', 'global_defaults', 'load_base_conf_files', 'load_collection', 'load_defaults', 'load_overrides', 'load_project', 'load_runtime', 'load_shell_env', 'load_system', 'load_user', 'merge', 'pop', 'popitem', 'prefix', 'set_project_location', 'set_runtime_path', 'setdefault', 'update']
""".strip() # noqa
assert str(e) == expected
else:
assert False, "Didn't get an AttributeError on bad key!"
def subkeys_get_merged_not_overwritten(self):
# Ensures nested keys merge deeply instead of shallowly.
defaults = {"foo": {"bar": "baz"}}
overrides = {"foo": {"notbar": "notbaz"}}
c = Config(defaults=defaults, overrides=overrides)
assert c.foo.notbar == "notbaz"
assert c.foo.bar == "baz"
def is_iterable_like_dict(self):
c = Config(defaults={"a": 1, "b": 2})
assert set(c.keys()) == {"a", "b"}
assert set(list(c)) == {"a", "b"}
def supports_readonly_dict_protocols(self):
# Use single-keypair dict to avoid sorting problems in tests.
c = Config(defaults={"foo": "bar"})
c2 = Config(defaults={"foo": "bar"})
assert "foo" in c
assert "foo" in c2 # mostly just to trigger loading :x
assert c == c2
assert len(c) == 1
assert c.get("foo") == "bar"
if six.PY2:
assert c.has_key("foo") is True # noqa
assert list(c.iterkeys()) == ["foo"]
assert list(c.itervalues()) == ["bar"]
assert list(c.items()) == [("foo", "bar")]
assert list(six.iteritems(c)) == [("foo", "bar")]
assert list(c.keys()) == ["foo"]
assert list(c.values()) == ["bar"]
class runtime_loading_of_defaults_and_overrides:
def defaults_can_be_given_via_method(self):
c = Config()
assert "foo" not in c
c.load_defaults({"foo": "bar"})
assert c.foo == "bar"
def defaults_can_skip_merging(self):
c = Config()
c.load_defaults({"foo": "bar"}, merge=False)
assert "foo" not in c
c.merge()
assert c.foo == "bar"
def overrides_can_be_given_via_method(self):
c = Config(defaults={"foo": "bar"})
assert c.foo == "bar" # defaults level
c.load_overrides({"foo": "notbar"})
assert c.foo == "notbar" # overrides level
def overrides_can_skip_merging(self):
c = Config()
c.load_overrides({"foo": "bar"}, merge=False)
assert "foo" not in c
c.merge()
assert c.foo == "bar"
class deletion_methods:
def pop(self):
# Root
c = Config(defaults={"foo": "bar"})
assert c.pop("foo") == "bar"
assert c == {}
# With the default arg
assert c.pop("wut", "fine then") == "fine then"
# Leaf (different key to avoid AmbiguousMergeError)
c.nested = {"leafkey": "leafval"}
assert c.nested.pop("leafkey") == "leafval"
assert c == {"nested": {}}
def delitem(self):
"__delitem__"
c = Config(defaults={"foo": "bar"})
del c["foo"]
assert c == {}
c.nested = {"leafkey": "leafval"}
del c.nested["leafkey"]
assert c == {"nested": {}}
def delattr(self):
"__delattr__"
c = Config(defaults={"foo": "bar"})
del c.foo
assert c == {}
c.nested = {"leafkey": "leafval"}
del c.nested.leafkey
assert c == {"nested": {}}
def clear(self):
c = Config(defaults={"foo": "bar"})
c.clear()
assert c == {}
c.nested = {"leafkey": "leafval"}
c.nested.clear()
assert c == {"nested": {}}
def popitem(self):
c = Config(defaults={"foo": "bar"})
assert c.popitem() == ("foo", "bar")
assert c == {}
c.nested = {"leafkey": "leafval"}
assert c.nested.popitem() == ("leafkey", "leafval")
assert c == {"nested": {}}
class modification_methods:
def setitem(self):
c = Config(defaults={"foo": "bar"})
c["foo"] = "notbar"
assert c.foo == "notbar"
del c["foo"]
c["nested"] = {"leafkey": "leafval"}
assert c == {"nested": {"leafkey": "leafval"}}
def setdefault(self):
c = Config({"foo": "bar", "nested": {"leafkey": "leafval"}})
assert c.setdefault("foo") == "bar"
assert c.nested.setdefault("leafkey") == "leafval"
assert c.setdefault("notfoo", "notbar") == "notbar"
assert c.notfoo == "notbar"
nested = c.nested.setdefault("otherleaf", "otherval")
assert nested == "otherval"
assert c.nested.otherleaf == "otherval"
def update(self):
c = Config(
defaults={"foo": "bar", "nested": {"leafkey": "leafval"}}
)
# Regular update(dict)
c.update({"foo": "notbar"})
assert c.foo == "notbar"
c.nested.update({"leafkey": "otherval"})
assert c.nested.leafkey == "otherval"
# Apparently allowed but wholly useless
c.update()
expected = {"foo": "notbar", "nested": {"leafkey": "otherval"}}
assert c == expected
# Kwarg edition
c.update(foo="otherbar")
assert c.foo == "otherbar"
# Iterator of 2-tuples edition
c.nested.update(
[("leafkey", "yetanotherval"), ("newleaf", "turnt")]
)
assert c.nested.leafkey == "yetanotherval"
assert c.nested.newleaf == "turnt"
def reinstatement_of_deleted_values_works_ok(self):
# Sounds like a stupid thing to test, but when we have to track
# deletions and mutations manually...it's an easy thing to overlook
c = Config(defaults={"foo": "bar"})
assert c.foo == "bar"
del c["foo"]
# Sanity checks
assert "foo" not in c
assert len(c) == 0
# Put it back again...as a different value, for funsies
c.foo = "formerly bar"
# And make sure it stuck
assert c.foo == "formerly bar"
def deleting_parent_keys_of_deleted_keys_subsumes_them(self):
c = Config({"foo": {"bar": "biz"}})
del c.foo["bar"]
del c.foo
# Make sure we didn't somehow still end up with {'foo': {'bar':
# None}}
assert c._deletions == {"foo": None}
def supports_mutation_via_attribute_access(self):
c = Config({"foo": "bar"})
assert c.foo == "bar"
c.foo = "notbar"
assert c.foo == "notbar"
assert c["foo"] == "notbar"
def supports_nested_mutation_via_attribute_access(self):
c = Config({"foo": {"bar": "biz"}})
assert c.foo.bar == "biz"
c.foo.bar = "notbiz"
assert c.foo.bar == "notbiz"
assert c["foo"]["bar"] == "notbiz"
def real_attrs_and_methods_win_over_attr_proxying(self):
# Setup
class MyConfig(Config):
myattr = None
def mymethod(self):
return 7
c = MyConfig({"myattr": "foo", "mymethod": "bar"})
# By default, attr and config value separate
assert c.myattr is None
assert c["myattr"] == "foo"
# After a setattr, same holds true
c.myattr = "notfoo"
assert c.myattr == "notfoo"
assert c["myattr"] == "foo"
# Method and config value separate
assert callable(c.mymethod)
assert c.mymethod() == 7
assert c["mymethod"] == "bar"
# And same after setattr
def monkeys():
return 13
c.mymethod = monkeys
assert c.mymethod() == 13
assert c["mymethod"] == "bar"
def config_itself_stored_as_private_name(self):
# I.e. one can refer to a key called 'config', which is relatively
# commonplace (e.g. <Config>.myservice.config -> a config file
# contents or path or etc)
c = Config()
c["foo"] = {"bar": "baz"}
c["whatever"] = {"config": "myconfig"}
assert c.foo.bar == "baz"
assert c.whatever.config == "myconfig"
def inherited_real_attrs_also_win_over_config_keys(self):
class MyConfigParent(Config):
parent_attr = 17
class MyConfig(MyConfigParent):
pass
c = MyConfig()
assert c.parent_attr == 17
c.parent_attr = 33
oops = "Oops! Looks like config won over real attr!"
assert "parent_attr" not in c, oops
assert c.parent_attr == 33
c["parent_attr"] = "fifteen"
assert c.parent_attr == 33
assert c["parent_attr"] == "fifteen"
def nonexistent_attrs_can_be_set_to_create_new_top_level_configs(self):
# I.e. some_config.foo = 'bar' is like some_config['foo'] = 'bar'.
# When this test breaks it usually means some_config.foo = 'bar'
# sets a regular attribute - and the configuration itself is never
# touched!
c = Config()
c.some_setting = "some_value"
assert c["some_setting"] == "some_value"
def nonexistent_attr_setting_works_nested_too(self):
c = Config()
c.a_nest = {}
assert c["a_nest"] == {}
c.a_nest.an_egg = True
assert c["a_nest"]["an_egg"]
def string_display(self):
"__str__ and friends"
config = Config(defaults={"foo": "bar"})
assert repr(config) == "<Config: {'foo': 'bar'}>"
def merging_does_not_wipe_user_modifications_or_deletions(self):
c = Config({"foo": {"bar": "biz"}, "error": True})
c.foo.bar = "notbiz"
del c["error"]
assert c["foo"]["bar"] == "notbiz"
assert "error" not in c
c.merge()
# Will be back to 'biz' if user changes don't get saved on their
# own (previously they are just mutations on the cached central
# config)
assert c["foo"]["bar"] == "notbiz"
# And this would still be here, too
assert "error" not in c
class config_file_loading:
"Configuration file loading"
def system_global(self):
"Systemwide conf files"
# NOTE: using lazy=True to avoid autoloading so we can prove
# load_system() works.
for type_ in TYPES:
config = _load("system_prefix", type_, lazy=True)
assert "outer" not in config
config.load_system()
assert config.outer.inner.hooray == type_
def system_can_skip_merging(self):
config = _load("system_prefix", "yml", lazy=True)
assert "outer" not in config._system
assert "outer" not in config
config.load_system(merge=False)
# Test that we loaded into the per-level dict, but not the
# central/merged config.
assert "outer" in config._system
assert "outer" not in config
def user_specific(self):
"User-specific conf files"
# NOTE: using lazy=True to avoid autoloading so we can prove
# load_user() works.
for type_ in TYPES:
config = _load("user_prefix", type_, lazy=True)
assert "outer" not in config
config.load_user()
assert config.outer.inner.hooray == type_
def user_can_skip_merging(self):
config = _load("user_prefix", "yml", lazy=True)
assert "outer" not in config._user
assert "outer" not in config
config.load_user(merge=False)
# Test that we loaded into the per-level dict, but not the
# central/merged config.
assert "outer" in config._user
assert "outer" not in config
def project_specific(self):
"Local-to-project conf files"
for type_ in TYPES:
c = Config(project_location=join(CONFIGS_PATH, type_))
assert "outer" not in c
c.load_project()
assert c.outer.inner.hooray == type_
def project_can_skip_merging(self):
config = Config(
project_location=join(CONFIGS_PATH, "yml"), lazy=True
)
assert "outer" not in config._project
assert "outer" not in config
config.load_project(merge=False)
# Test that we loaded into the per-level dict, but not the
# central/merged config.
assert "outer" in config._project
assert "outer" not in config
def loads_no_project_specific_file_if_no_project_location_given(self):
c = Config()
assert c._project_path is None
c.load_project()
assert list(c._project.keys()) == []
defaults = ["tasks", "run", "runners", "sudo", "timeouts"]
assert set(c.keys()) == set(defaults)
def project_location_can_be_set_after_init(self):
c = Config()
assert "outer" not in c
c.set_project_location(join(CONFIGS_PATH, "yml"))
c.load_project()
assert c.outer.inner.hooray == "yml"
def runtime_conf_via_cli_flag(self):
c = Config(runtime_path=join(CONFIGS_PATH, "yaml", "invoke.yaml"))
c.load_runtime()
assert c.outer.inner.hooray == "yaml"
def runtime_can_skip_merging(self):
path = join(CONFIGS_PATH, "yaml", "invoke.yaml")
config = Config(runtime_path=path, lazy=True)
assert "outer" not in config._runtime
assert "outer" not in config
config.load_runtime(merge=False)
# Test that we loaded into the per-level dict, but not the
# central/merged config.
assert "outer" in config._runtime
assert "outer" not in config
@raises(UnknownFileType)
def unknown_suffix_in_runtime_path_raises_useful_error(self):
c = Config(runtime_path=join(CONFIGS_PATH, "screw.ini"))
c.load_runtime()
def python_modules_dont_load_special_vars(self):
"Python modules don't load special vars"
# Borrow another test's Python module.
c = _load("system_prefix", "python")
# Sanity test that lowercase works
assert c.outer.inner.hooray == "python"
# Real test that builtins, etc are stripped out
for special in ("builtins", "file", "package", "name", "doc"):
assert "__{}__".format(special) not in c
def python_modules_except_usefully_on_unpicklable_modules(self):
# Re: #556; when bug present, a TypeError pops up instead (granted,
# at merge time, but we want it to raise ASAP, so we're testing the
# intended new behavior: raising at config load time.
c = Config()
c.set_runtime_path(join(support, "has_modules.py"))
expected = r"'os' is a module.*giving a tasks file.*mistake"
with pytest.raises(UnpicklableConfigMember, match=expected):
c.load_runtime(merge=False)
@patch("invoke.config.debug")
def nonexistent_files_are_skipped_and_logged(self, mock_debug):
c = Config()
c._load_yml = Mock(side_effect=IOError(2, "aw nuts"))
c.set_runtime_path("is-a.yml") # Triggers use of _load_yml
c.load_runtime()
mock_debug.assert_any_call("Didn't see any is-a.yml, skipping.")
@raises(IOError)
def non_missing_file_IOErrors_are_raised(self):
c = Config()
c._load_yml = Mock(side_effect=IOError(17, "uh, what?"))
c.set_runtime_path("is-a.yml") # Triggers use of _load_yml
c.load_runtime()
class collection_level_config_loading:
def performed_explicitly_and_directly(self):
# TODO: do we want to update the other levels to allow 'direct'
# loading like this, now that they all have explicit methods?
c = Config()
assert "foo" not in c
c.load_collection({"foo": "bar"})
assert c.foo == "bar"
def merging_can_be_deferred(self):
c = Config()
assert "foo" not in c._collection
assert "foo" not in c
c.load_collection({"foo": "bar"}, merge=False)
assert "foo" in c._collection
assert "foo" not in c
class comparison_and_hashing:
def comparison_looks_at_merged_config(self):
c1 = Config(defaults={"foo": {"bar": "biz"}})
# Empty defaults to suppress global_defaults
c2 = Config(defaults={}, overrides={"foo": {"bar": "biz"}})
assert c1 is not c2
assert c1._defaults != c2._defaults
assert c1 == c2
def allows_comparison_with_real_dicts(self):
c = Config({"foo": {"bar": "biz"}})
assert c["foo"] == {"bar": "biz"}
@raises(TypeError)
def is_explicitly_not_hashable(self):
hash(Config())
class env_vars:
"Environment variables"
def base_case_defaults_to_INVOKE_prefix(self):
os.environ["INVOKE_FOO"] = "bar"
c = Config(defaults={"foo": "notbar"})
c.load_shell_env()
assert c.foo == "bar"
def non_predeclared_settings_do_not_get_consumed(self):
os.environ["INVOKE_HELLO"] = "is it me you're looking for?"
c = Config()
c.load_shell_env()
assert "HELLO" not in c
assert "hello" not in c
def underscores_top_level(self):
os.environ["INVOKE_FOO_BAR"] = "biz"
c = Config(defaults={"foo_bar": "notbiz"})
c.load_shell_env()
assert c.foo_bar == "biz"
def underscores_nested(self):
os.environ["INVOKE_FOO_BAR"] = "biz"
c = Config(defaults={"foo": {"bar": "notbiz"}})
c.load_shell_env()
assert c.foo.bar == "biz"
def both_types_of_underscores_mixed(self):
os.environ["INVOKE_FOO_BAR_BIZ"] = "baz"
c = Config(defaults={"foo_bar": {"biz": "notbaz"}})
c.load_shell_env()
assert c.foo_bar.biz == "baz"
@raises(AmbiguousEnvVar)
def ambiguous_underscores_dont_guess(self):
os.environ["INVOKE_FOO_BAR"] = "biz"
c = Config(defaults={"foo_bar": "wat", "foo": {"bar": "huh"}})
c.load_shell_env()
class type_casting:
def strings_replaced_with_env_value(self):
os.environ["INVOKE_FOO"] = u"myvalue"
c = Config(defaults={"foo": "myoldvalue"})
c.load_shell_env()
assert c.foo == u"myvalue"
assert isinstance(c.foo, six.text_type)
def unicode_replaced_with_env_value(self):
# Python 3 doesn't allow you to put 'bytes' objects into
# os.environ, so the test makes no sense there.
if six.PY3:
return
os.environ["INVOKE_FOO"] = "myunicode"
c = Config(defaults={"foo": u"myoldvalue"})
c.load_shell_env()
assert c.foo == "myunicode"
assert isinstance(c.foo, str)
def None_replaced(self):
os.environ["INVOKE_FOO"] = "something"
c = Config(defaults={"foo": None})
c.load_shell_env()
assert c.foo == "something"
def booleans(self):
for input_, result in (
("0", False),
("1", True),
("", False),
("meh", True),
("false", True),
):
os.environ["INVOKE_FOO"] = input_
c = Config(defaults={"foo": bool()})
c.load_shell_env()
assert c.foo == result
def boolean_type_inputs_with_non_boolean_defaults(self):
for input_ in ("0", "1", "", "meh", "false"):
os.environ["INVOKE_FOO"] = input_
c = Config(defaults={"foo": "bar"})
c.load_shell_env()
assert c.foo == input_
def numeric_types_become_casted(self):
tests = [
(int, "5", 5),
(float, "5.5", 5.5),
# TODO: more?
]
# Can't use '5L' in Python 3, even having it in a branch makes
# it upset.
if not six.PY3:
tests.append((long, "5", long(5))) # noqa
for old, new_, result in tests:
os.environ["INVOKE_FOO"] = new_
c = Config(defaults={"foo": old()})
c.load_shell_env()
assert c.foo == result
def arbitrary_types_work_too(self):
os.environ["INVOKE_FOO"] = "whatever"
class Meh(object):
def __init__(self, thing=None):
pass
old_obj = Meh()
c = Config(defaults={"foo": old_obj})
c.load_shell_env()
assert isinstance(c.foo, Meh)
assert c.foo is not old_obj
class uncastable_types:
@raises(UncastableEnvVar)
def _uncastable_type(self, default):
os.environ["INVOKE_FOO"] = "stuff"
c = Config(defaults={"foo": default})
c.load_shell_env()
def lists(self):
self._uncastable_type(["a", "list"])
def tuples(self):
self._uncastable_type(("a", "tuple"))
class hierarchy:
"Config hierarchy in effect"
#
# NOTE: most of these just leverage existing test fixtures (which live
# in their own directories & have differing values for the 'hooray'
# key), since we normally don't need more than 2-3 different file
# locations for any one test.
#
def collection_overrides_defaults(self):
c = Config(defaults={"nested": {"setting": "default"}})
c.load_collection({"nested": {"setting": "collection"}})
assert c.nested.setting == "collection"
def systemwide_overrides_collection(self):
c = Config(system_prefix=join(CONFIGS_PATH, "yaml/"))
c.load_collection({"outer": {"inner": {"hooray": "defaults"}}})
assert c.outer.inner.hooray == "yaml"
def user_overrides_systemwide(self):
c = Config(
system_prefix=join(CONFIGS_PATH, "yaml/"),
user_prefix=join(CONFIGS_PATH, "json/"),
)
assert c.outer.inner.hooray == "json"
def user_overrides_collection(self):
c = Config(user_prefix=join(CONFIGS_PATH, "json/"))
c.load_collection({"outer": {"inner": {"hooray": "defaults"}}})
assert c.outer.inner.hooray == "json"
def project_overrides_user(self):
c = Config(
user_prefix=join(CONFIGS_PATH, "json/"),
project_location=join(CONFIGS_PATH, "yaml"),
)
c.load_project()
assert c.outer.inner.hooray == "yaml"
def project_overrides_systemwide(self):
c = Config(
system_prefix=join(CONFIGS_PATH, "json/"),
project_location=join(CONFIGS_PATH, "yaml"),
)
c.load_project()
assert c.outer.inner.hooray == "yaml"
def project_overrides_collection(self):
c = Config(project_location=join(CONFIGS_PATH, "yaml"))
c.load_project()
c.load_collection({"outer": {"inner": {"hooray": "defaults"}}})
assert c.outer.inner.hooray == "yaml"
def env_vars_override_project(self):
os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env"
c = Config(project_location=join(CONFIGS_PATH, "yaml"))
c.load_project()
c.load_shell_env()
assert c.outer.inner.hooray == "env"
def env_vars_override_user(self):
os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env"
c = Config(user_prefix=join(CONFIGS_PATH, "yaml/"))
c.load_shell_env()
assert c.outer.inner.hooray == "env"
def env_vars_override_systemwide(self):
os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env"
c = Config(system_prefix=join(CONFIGS_PATH, "yaml/"))
c.load_shell_env()
assert c.outer.inner.hooray == "env"
def env_vars_override_collection(self):
os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env"
c = Config()
c.load_collection({"outer": {"inner": {"hooray": "defaults"}}})
c.load_shell_env()
assert c.outer.inner.hooray == "env"
def runtime_overrides_env_vars(self):
os.environ["INVOKE_OUTER_INNER_HOORAY"] = "env"
c = Config(runtime_path=join(CONFIGS_PATH, "json", "invoke.json"))
c.load_runtime()
c.load_shell_env()
assert c.outer.inner.hooray == "json"
def runtime_overrides_project(self):
c = Config(
runtime_path=join(CONFIGS_PATH, "json", "invoke.json"),
project_location=join(CONFIGS_PATH, "yaml"),
)
c.load_runtime()
c.load_project()
assert c.outer.inner.hooray == "json"
def runtime_overrides_user(self):
c = Config(
runtime_path=join(CONFIGS_PATH, "json", "invoke.json"),
user_prefix=join(CONFIGS_PATH, "yaml/"),
)
c.load_runtime()
assert c.outer.inner.hooray == "json"
def runtime_overrides_systemwide(self):
c = Config(
runtime_path=join(CONFIGS_PATH, "json", "invoke.json"),
system_prefix=join(CONFIGS_PATH, "yaml/"),
)
c.load_runtime()
assert c.outer.inner.hooray == "json"
def runtime_overrides_collection(self):
c = Config(runtime_path=join(CONFIGS_PATH, "json", "invoke.json"))
c.load_collection({"outer": {"inner": {"hooray": "defaults"}}})
c.load_runtime()
assert c.outer.inner.hooray == "json"
def cli_overrides_override_all(self):
"CLI-driven overrides win vs all other layers"
# TODO: expand into more explicit tests like the above? meh
c = Config(
overrides={"outer": {"inner": {"hooray": "overrides"}}},
runtime_path=join(CONFIGS_PATH, "json", "invoke.json"),
)
c.load_runtime()
assert c.outer.inner.hooray == "overrides"
def yaml_prevents_yml_json_or_python(self):
c = Config(system_prefix=join(CONFIGS_PATH, "all-four/"))
assert "json-only" not in c
assert "python_only" not in c
assert "yml-only" not in c
assert "yaml-only" in c
assert c.shared == "yaml-value"
def yml_prevents_json_or_python(self):
c = Config(system_prefix=join(CONFIGS_PATH, "three-of-em/"))
assert "json-only" not in c
assert "python_only" not in c
assert "yml-only" in c
assert c.shared == "yml-value"
def json_prevents_python(self):
c = Config(system_prefix=join(CONFIGS_PATH, "json-and-python/"))
assert "python_only" not in c
assert "json-only" in c
assert c.shared == "json-value"
class clone:
def preserves_basic_members(self):
c1 = Config(
defaults={"key": "default"},
overrides={"key": "override"},
system_prefix="global",
user_prefix="user",
project_location="project",
runtime_path="runtime.yaml",
)
c2 = c1.clone()
# NOTE: expecting identical defaults also implicitly tests that
# clone() passes in defaults= instead of doing an empty init +
# copy. (When that is not the case, we end up with
# global_defaults() being rerun and re-added to _defaults...)
assert c2._defaults == c1._defaults
assert c2._defaults is not c1._defaults
assert c2._overrides == c1._overrides
assert c2._overrides is not c1._overrides
assert c2._system_prefix == c1._system_prefix
assert c2._user_prefix == c1._user_prefix
assert c2._project_prefix == c1._project_prefix
assert c2.prefix == c1.prefix
assert c2.file_prefix == c1.file_prefix
assert c2.env_prefix == c1.env_prefix
assert c2._runtime_path == c1._runtime_path
def preserves_merged_config(self):
c = Config(
defaults={"key": "default"}, overrides={"key": "override"}
)
assert c.key == "override"
assert c._defaults["key"] == "default"
c2 = c.clone()
assert c2.key == "override"
assert c2._defaults["key"] == "default"
assert c2._overrides["key"] == "override"
def preserves_file_data(self):
c = Config(system_prefix=join(CONFIGS_PATH, "yaml/"))
assert c.outer.inner.hooray == "yaml"
c2 = c.clone()
assert c2.outer.inner.hooray == "yaml"
assert c2._system == {"outer": {"inner": {"hooray": "yaml"}}}
@patch.object(
Config,
"_load_yaml",
return_value={"outer": {"inner": {"hooray": "yaml"}}},
)
def does_not_reload_file_data(self, load_yaml):
path = join(CONFIGS_PATH, "yaml/")
c = Config(system_prefix=path)
c2 = c.clone()
assert c2.outer.inner.hooray == "yaml"
# Crummy way to say "only got called with this specific invocation
# one time" (since assert_calls_with gets mad about other
# invocations w/ different args)
calls = load_yaml.call_args_list
my_call = call("{}invoke.yaml".format(path))
try:
calls.remove(my_call)
assert my_call not in calls
except ValueError:
err = "{} not found in {} even once!"
assert False, err.format(my_call, calls)
def preserves_env_data(self):
os.environ["INVOKE_FOO"] = "bar"
c = Config(defaults={"foo": "notbar"})
c.load_shell_env()
c2 = c.clone()
assert c2.foo == "bar"
def works_correctly_when_subclassed(self):
# Because sometimes, implementation #1 is really naive!
class MyConfig(Config):
pass
c = MyConfig()
assert isinstance(c, MyConfig) # sanity
c2 = c.clone()
assert isinstance(c2, MyConfig) # actual test
class into_kwarg:
"'into' kwarg"
def is_not_required(self):
c = Config(defaults={"meh": "okay"})
c2 = c.clone()
assert c2.meh == "okay"
def raises_TypeError_if_value_is_not_Config_subclass(self):
try:
Config().clone(into=17)
except TypeError:
pass
else:
assert False, "Non-class obj did not raise TypeError!"
class Foo(object):
pass
try:
Config().clone(into=Foo)
except TypeError:
pass
else:
assert False, "Non-subclass did not raise TypeError!"
def resulting_clones_are_typed_as_new_class(self):
class MyConfig(Config):
pass
c = Config()
c2 = c.clone(into=MyConfig)
assert type(c2) is MyConfig
def non_conflicting_values_are_merged(self):
# NOTE: this is really just basic clone behavior.
class MyConfig(Config):
@staticmethod
def global_defaults():
orig = Config.global_defaults()
orig["new"] = {"data": "ohai"}
return orig
c = Config(defaults={"other": {"data": "hello"}})
c["runtime"] = {"modification": "sup"}
c2 = c.clone(into=MyConfig)
# New default data from MyConfig present
assert c2.new.data == "ohai"
# As well as old default data from the cloned instance
assert c2.other.data == "hello"
# And runtime user mods from the cloned instance
assert c2.runtime.modification == "sup"
def does_not_deepcopy(self):
c = Config(
defaults={
# Will merge_dicts happily
"oh": {"dear": {"god": object()}},
# And shallow-copy compound values
"shallow": {"objects": ["copy", "okay"]},
# Will preserve refrences to the innermost dict, sadly. Not
# much we can do without incurring deepcopy problems (or
# reimplementing it entirely)
"welp": {"cannot": ["have", {"everything": "we want"}]},
}
)
c2 = c.clone()
# Basic identity
assert c is not c2, "Clone had same identity as original!"
# Dicts get recreated
assert c.oh is not c2.oh, "Top level key had same identity!"
assert (
c.oh.dear is not c2.oh.dear
), "Midlevel key had same identity!" # noqa
# Basic values get copied
err = "Leaf object() had same identity!"
assert c.oh.dear.god is not c2.oh.dear.god, err
assert c.shallow.objects == c2.shallow.objects
err = "Shallow list had same identity!"
assert c.shallow.objects is not c2.shallow.objects, err
# Deeply nested non-dict objects are stil problematic, oh well
err = "Huh, a deeply nested dict-in-a-list had different identity?"
assert c.welp.cannot[1] is c2.welp.cannot[1], err
err = "Huh, a deeply nested dict-in-a-list value had different identity?" # noqa
assert (
c.welp.cannot[1]["everything"]
is c2.welp.cannot[1]["everything"]
), err # noqa
def can_be_pickled(self):
c = Config(overrides={"foo": {"bar": {"biz": ["baz", "buzz"]}}})
c2 = pickle.loads(pickle.dumps(c))
assert c == c2
assert c is not c2
assert c.foo.bar.biz is not c2.foo.bar.biz
# NOTE: merge_dicts has its own very low level unit tests in its own file
|
the-stack_106_27349 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
import sys
from llnl.util.filesystem import fix_darwin_install_name
class Papi(Package):
"""PAPI provides the tool designer and application engineer with a
consistent interface and methodology for use of the performance
counter hardware found in most major microprocessors. PAPI
enables software engineers to see, in near real time, the
relation between software performance and processor events. In
addition Component PAPI provides access to a collection of
components that expose performance measurement opportunites
across the hardware and software stack."""
homepage = "http://icl.cs.utk.edu/papi/index.html"
url = "http://icl.cs.utk.edu/projects/papi/downloads/papi-5.4.1.tar.gz"
version('5.5.1', '86a8a6f3d0f34cd83251da3514aae15d')
version('5.5.0', '5e1244a04ca031d4cc29b46ce3dd05b5')
version('5.4.3', '3211b5a5bb389fe692370f5cf4cc2412')
version('5.4.1', '9134a99219c79767a11463a76b0b01a2')
version('5.3.0', '367961dd0ab426e5ae367c2713924ffb')
def install(self, spec, prefix):
with working_dir("src"):
configure_args = ["--prefix=%s" % prefix]
# PAPI uses MPI if MPI is present; since we don't require
# an MPI package, we ensure that all attempts to use MPI
# fail, so that PAPI does not get confused
configure_args.append('MPICC=:')
configure(*configure_args)
# Don't use <malloc.h>
for level in [".", "*", "*/*"]:
files = glob.iglob(join_path(level, "*.[ch]"))
filter_file(r"\<malloc\.h\>", "<stdlib.h>", *files)
make()
make("install")
# The shared library is not installed correctly on Darwin
if sys.platform == 'darwin':
os.rename(join_path(prefix.lib, 'libpapi.so'),
join_path(prefix.lib, 'libpapi.dylib'))
fix_darwin_install_name(prefix.lib)
|
the-stack_106_27350 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import os
import random
import re
import sys
from abc import ABCMeta, abstractmethod
from threading import Thread
from time import time, sleep
import os.path
from os.path import dirname, exists, isdir, join
import mycroft.util
from mycroft.enclosure.api import EnclosureAPI
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.metrics import report_timing, Stopwatch
from mycroft.util import (
play_wav, play_mp3, check_for_signal, create_signal, resolve_resource_file
)
from mycroft.util.log import LOG
from queue import Queue, Empty
def send_playback_metric(stopwatch, ident):
"""
Send playback metrics in a background thread
"""
def do_send(stopwatch, ident):
report_timing(ident, 'speech_playback', stopwatch)
t = Thread(target=do_send, args=(stopwatch, ident))
t.daemon = True
t.start()
class PlaybackThread(Thread):
"""
Thread class for playing back tts audio and sending
visime data to enclosure.
"""
def __init__(self, queue):
super(PlaybackThread, self).__init__()
self.queue = queue
self._terminated = False
self._processing_queue = False
self._clear_visimes = False
def init(self, tts):
self.tts = tts
def clear_queue(self):
"""
Remove all pending playbacks.
"""
while not self.queue.empty():
self.queue.get()
try:
self.p.terminate()
except Exception:
pass
def run(self):
"""
Thread main loop. get audio and visime data from queue
and play.
"""
while not self._terminated:
try:
snd_type, data, visimes, ident = self.queue.get(timeout=2)
self.blink(0.5)
if not self._processing_queue:
self._processing_queue = True
self.tts.begin_audio()
stopwatch = Stopwatch()
with stopwatch:
if snd_type == 'wav':
self.p = play_wav(data)
elif snd_type == 'mp3':
self.p = play_mp3(data)
if visimes:
if self.show_visimes(visimes):
self.clear_queue()
else:
self.p.communicate()
self.p.wait()
send_playback_metric(stopwatch, ident)
if self.queue.empty():
self.tts.end_audio()
self._processing_queue = False
self._clear_visimes = False
self.blink(0.2)
except Empty:
pass
except Exception as e:
LOG.exception(e)
if self._processing_queue:
self.tts.end_audio()
self._processing_queue = False
def show_visimes(self, pairs):
"""
Send visime data to enclosure
Args:
pairs(list): Visime and timing pair
Returns:
True if button has been pressed.
"""
start = time()
if self.enclosure:
self.enclosure.mouth_viseme_list(start, pairs)
# TODO 19.02 Remove the one by one method below
for code, duration in pairs:
if self._clear_visimes:
self._clear_visimes = False
return True
if self.enclosure:
# Include time stamp to assist with animation timing
self.enclosure.mouth_viseme(code, start + duration)
delta = time() - start
if delta < duration:
sleep(duration - delta)
return False
def clear_visimes(self):
self._clear_visimes = True
def clear(self):
""" Clear all pending actions for the TTS playback thread. """
self.clear_queue()
self.clear_visimes()
def blink(self, rate=1.0):
""" Blink mycroft's eyes """
if self.enclosure and random.random() < rate:
self.enclosure.eyes_blink("b")
def stop(self):
""" Stop thread """
self._terminated = True
self.clear_queue()
class TTS(metaclass=ABCMeta):
"""
TTS abstract class to be implemented by all TTS engines.
It aggregates the minimum required parameters and exposes
``execute(sentence)`` and ``validate_ssml(sentence)`` functions.
Args:
lang (str):
config (dict): Configuration for this specific tts engine
validator (TTSValidator): Used to verify proper installation
phonetic_spelling (bool): Whether to spell certain words phonetically
ssml_tags (list): Supported ssml properties. Ex. ['speak', 'prosody']
"""
def __init__(self, lang, config, validator, audio_ext='wav',
phonetic_spelling=True, ssml_tags=None):
super(TTS, self).__init__()
self.bus = None # initalized in "init" step
self.lang = lang or 'en-us'
self.config = config
self.validator = validator
self.phonetic_spelling = phonetic_spelling
self.audio_ext = audio_ext
self.ssml_tags = ssml_tags or []
self.voice = config.get("voice")
self.filename = '/tmp/tts.wav'
self.enclosure = None
random.seed()
self.queue = Queue()
self.playback = PlaybackThread(self.queue)
self.playback.start()
self.clear_cache()
self.spellings = self.load_spellings()
def load_spellings(self):
"""Load phonetic spellings of words as dictionary"""
path = join('text', self.lang, 'phonetic_spellings.txt')
spellings_file = resolve_resource_file(path)
if not spellings_file:
return {}
try:
with open(spellings_file) as f:
lines = filter(bool, f.read().split('\n'))
lines = [i.split(':') for i in lines]
return {key.strip(): value.strip() for key, value in lines}
except ValueError:
LOG.exception('Failed to load phonetic spellings.')
return {}
def begin_audio(self):
"""Helper function for child classes to call in execute()"""
# Create signals informing start of speech
self.bus.emit(Message("recognizer_loop:audio_output_start"))
def end_audio(self):
"""
Helper function for child classes to call in execute().
Sends the recognizer_loop:audio_output_end message, indicating
that speaking is done for the moment. It also checks if cache
directory needs cleaning to free up disk space.
"""
self.bus.emit(Message("recognizer_loop:audio_output_end"))
# Clean the cache as needed
cache_dir = mycroft.util.get_cache_directory("tts")
mycroft.util.curate_cache(cache_dir, min_free_percent=100)
# This check will clear the "signal"
check_for_signal("isSpeaking")
def init(self, bus):
""" Performs intial setup of TTS object.
Arguments:
bus: Mycroft messagebus connection
"""
self.bus = bus
self.playback.init(self)
self.enclosure = EnclosureAPI(self.bus)
self.playback.enclosure = self.enclosure
def get_tts(self, sentence, wav_file):
"""
Abstract method that a tts implementation needs to implement.
Should get data from tts.
Args:
sentence(str): Sentence to synthesize
wav_file(str): output file
Returns:
tuple: (wav_file, phoneme)
"""
pass
def modify_tag(self, tag):
"""Override to modify each supported ssml tag"""
return tag
@staticmethod
def remove_ssml(text):
return re.sub('<[^>]*>', '', text).replace(' ', ' ')
def validate_ssml(self, utterance):
"""
Check if engine supports ssml, if not remove all tags
Remove unsupported / invalid tags
Args:
utterance(str): Sentence to validate
Returns: validated_sentence (str)
"""
# if ssml is not supported by TTS engine remove all tags
if not self.ssml_tags:
return self.remove_ssml(utterance)
# find ssml tags in string
tags = re.findall('<[^>]*>', utterance)
for tag in tags:
if any(supported in tag for supported in self.ssml_tags):
utterance = utterance.replace(tag, self.modify_tag(tag))
else:
# remove unsupported tag
utterance = utterance.replace(tag, "")
# return text with supported ssml tags only
return utterance.replace(" ", " ")
def execute(self, sentence, ident=None):
"""
Convert sentence to speech, preprocessing out unsupported ssml
The method caches results if possible using the hash of the
sentence.
Args:
sentence: Sentence to be spoken
ident: Id reference to current interaction
"""
sentence = self.validate_ssml(sentence)
create_signal("isSpeaking")
if self.phonetic_spelling:
for word in re.findall(r"[\w']+", sentence):
if word.lower() in self.spellings:
sentence = sentence.replace(word,
self.spellings[word.lower()])
key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest())
wav_file = os.path.join(mycroft.util.get_cache_directory("tts"),
key + '.' + self.audio_ext)
if os.path.exists(wav_file):
LOG.debug("TTS cache hit")
phonemes = self.load_phonemes(key)
else:
wav_file, phonemes = self.get_tts(sentence, wav_file)
if phonemes:
self.save_phonemes(key, phonemes)
vis = self.visime(phonemes)
self.queue.put((self.audio_ext, wav_file, vis, ident))
def visime(self, phonemes):
"""
Create visimes from phonemes. Needs to be implemented for all
tts backend
Args:
phonemes(str): String with phoneme data
"""
return None
def clear_cache(self):
""" Remove all cached files. """
if not os.path.exists(mycroft.util.get_cache_directory('tts')):
return
for f in os.listdir(mycroft.util.get_cache_directory("tts")):
file_path = os.path.join(mycroft.util.get_cache_directory("tts"),
f)
if os.path.isfile(file_path):
os.unlink(file_path)
def save_phonemes(self, key, phonemes):
"""
Cache phonemes
Args:
key: Hash key for the sentence
phonemes: phoneme string to save
"""
cache_dir = mycroft.util.get_cache_directory("tts")
pho_file = os.path.join(cache_dir, key + ".pho")
try:
with open(pho_file, "w") as cachefile:
cachefile.write(phonemes)
except Exception:
LOG.exception("Failed to write {} to cache".format(pho_file))
pass
def load_phonemes(self, key):
"""
Load phonemes from cache file.
Args:
Key: Key identifying phoneme cache
"""
pho_file = os.path.join(mycroft.util.get_cache_directory("tts"),
key + ".pho")
if os.path.exists(pho_file):
try:
with open(pho_file, "r") as cachefile:
phonemes = cachefile.read().strip()
return phonemes
except Exception:
LOG.debug("Failed to read .PHO from cache")
return None
def __del__(self):
self.playback.stop()
self.playback.join()
class TTSValidator(metaclass=ABCMeta):
"""
TTS Validator abstract class to be implemented by all TTS engines.
It exposes and implements ``validate(tts)`` function as a template to
validate the TTS engines.
"""
def __init__(self, tts):
self.tts = tts
def validate(self):
self.validate_dependencies()
self.validate_instance()
self.validate_filename()
self.validate_lang()
self.validate_connection()
def validate_dependencies(self):
pass
def validate_instance(self):
clazz = self.get_tts_class()
if not isinstance(self.tts, clazz):
raise AttributeError('tts must be instance of ' + clazz.__name__)
def validate_filename(self):
filename = self.tts.filename
if not (filename and filename.endswith('.wav')):
raise AttributeError('file: %s must be in .wav format!' % filename)
dir_path = dirname(filename)
if not (exists(dir_path) and isdir(dir_path)):
raise AttributeError('filename: %s is not valid!' % filename)
@abstractmethod
def validate_lang(self):
pass
@abstractmethod
def validate_connection(self):
pass
@abstractmethod
def get_tts_class(self):
pass
class TTSFactory:
from mycroft.tts.espeak_tts import ESpeak
from mycroft.tts.fa_tts import FATTS
from mycroft.tts.google_tts import GoogleTTS
from mycroft.tts.mary_tts import MaryTTS
from mycroft.tts.mimic_tts import Mimic
# from mycroft.tts.flite_tts import Flite
from mycroft.tts.arabic_tts import ArabicTTS
from mycroft.tts.spdsay_tts import SpdSay
from mycroft.tts.bing_tts import BingTTS
from mycroft.tts.ibm_tts import WatsonTTS
from mycroft.tts.kacst_tts import KACSTTTS
from mycroft.tts.responsive_voice_tts import ResponsiveVoice
from mycroft.tts.mimic2_tts import Mimic2
CLASSES = {
"mimic": Mimic,
# "flite": Flite,
"arabic": ArabicTTS,
"mimic2": Mimic2,
"google": GoogleTTS,
"marytts": MaryTTS,
"fatts": FATTS,
"espeak": ESpeak,
"spdsay": SpdSay,
"watson": WatsonTTS,
"kacst": KACSTTTS,
"bing": BingTTS,
"responsive_voice": ResponsiveVoice
}
@staticmethod
def create():
"""
Factory method to create a TTS engine based on configuration.
The configuration file ``mycroft.conf`` contains a ``tts`` section with
the name of a TTS module to be read by this method.
"tts": {
"module": <engine_name>
}
"""
config = Configuration.get()
lang = config.get("lang", "en-us")
tts_module = config.get('tts', {}).get('module', 'mimic')
tts_config = config.get('tts', {}).get(tts_module, {})
tts_lang = tts_config.get('lang', lang)
clazz = TTSFactory.CLASSES.get(tts_module)
tts = clazz(tts_lang, tts_config)
tts.validator.validate()
return tts
|
the-stack_106_27351 | # coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2 python3
"""Tests for infectious_disease_rl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl.testing import absltest
from experiments import infectious_disease_rl
class InfectiousDiseaseRlTest(absltest.TestCase):
def test_dopamine_train_eval(self):
"""Tests that both train and eval can execute without raising errors."""
tmpdir = tempfile.mkdtemp()
runner = infectious_disease_rl.dopamine_train(
base_dir=tmpdir,
hidden_layer_size=10,
gamma=0.5,
learning_rate=0.1,
num_train_steps=10,
network='chain')
infectious_disease_rl.dopamine_eval(runner, patient0=0)
def test_negative_delta_percent_sick(self):
reward_fn = infectious_disease_rl.NegativeDeltaPercentSick(base=0.25)
observation = {'health_states': [0, 1, 2, 1]}
# 50% are infected. The base is 25%, so the negative delta is -0.25
self.assertEqual(reward_fn(observation), -0.25)
# Using the same observation a second time. Now the percent infected has not
# changed, so negative delta should be 0.
self.assertEqual(reward_fn(observation), 0)
if __name__ == '__main__':
absltest.main()
|
the-stack_106_27353 | """
Plotting imports for PyDSTool, from Matplotlib's pyplot library.
Robert Clewley, March 2006.
"""
from __future__ import absolute_import, print_function
from numpy import Inf, NaN, isfinite, int, int8, int16, int32, int64, float, float32, float64
try:
import matplotlib
ver = matplotlib.__version__.split(".")
if int(ver[0]) == 0 and int(ver[1]) < 65:
import matplotlib.matlab as plt
from matplotlib.matlab import *
else:
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
except RuntimeError as err:
if str(err) == 'could not open display':
failed=True
else:
raise
except ImportError:
failed=True
else:
failed=False
if failed:
# Dummy plot overrides for PyDSTool when matplotlib fails to import
def plot(*args, **kw):
print("Warning: plot does not work!")
def save_fig(fignum, fname, formats=[]):
print("Warning: plot does not work!")
print("Warning: matplotlib failed to import properly and so is not")
print(" providing a graphing interface")
plt = None # will cause an error if someone tries to access in order to plot
gca = None
else:
import os
from .Trajectory import Trajectory
from .common import _num_types
# Convenient shorthand to permit singleton numeric types and Trajectories
# in the plot arguments without first converting them to lists or arrays.
def plot(*args, **kw):
new_args = list(args)
if isinstance(args[0], _num_types):
new_args[0] = [args[0]]
elif isinstance(args[0], Trajectory):
try:
new_args[0] = args[0].sample()
except:
raise RuntimeError("Could not sample trajectory with default "
"options for plotting")
if len(args) > 1:
if isinstance(args[1], _num_types):
new_args[1] = [args[1]]
elif isinstance(args[1], Trajectory):
try:
new_args[1] = args[1].sample()
except:
raise RuntimeError("Could not sample trajectory with "
"default options for plotting")
return plt.plot(*tuple(new_args), **kw)
def save_fig(fignum, fname, formats=['png','svg','eps']):
"""Save figure fignum to multiple files with different formats
and extensions given by the formats argument.
These are platform-dependent and are specific to matplotlib's support.
"""
for f in formats:
plt.figure(fignum).savefig(fname+'.'+f)
|
the-stack_106_27354 | """
Following DeepRobust repo
https://github.com/DSE-MSU/DeepRobust
"""
"""
FGA: Fast Gradient Attack on Network Embedding (https://arxiv.org/pdf/1809.02797.pdf)
Another very similar algorithm to mention here is FGSM (for graph data).
It is mentioned in Zugner's paper,
Adversarial Attacks on Neural Networks for Graph Data, KDD'19
"""
import torch
from base_attack import BaseAttack
from torch.nn.parameter import Parameter
from copy import deepcopy
from utils import *
import torch.nn.functional as F
import scipy.sparse as sp
class FGA(BaseAttack):
"""FGA/FGSM.
Parameters
----------
model :
model to attack
nnodes : int
number of nodes in the input graph
feature_shape : tuple
shape of the input node features
attack_structure : bool
whether to attack graph structure
attack_features : bool
whether to attack node features
device: str
'cpu' or 'cuda'
Examples
--------
>>> from deeprobust.graph.data import Dataset
>>> from deeprobust.graph.defense import GCN
>>> from deeprobust.graph.targeted_attack import FGA
>>> data = Dataset(root='/tmp/', name='cora')
>>> adj, features, labels = data.adj, data.features, data.labels
>>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
>>> # Setup Surrogate model
>>> surrogate = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,
nhid=16, dropout=0, with_relu=False, with_bias=False, device='cpu').to('cpu')
>>> surrogate.fit(features, adj, labels, idx_train, idx_val, patience=30)
>>> # Setup Attack Model
>>> target_node = 0
>>> model = FGA(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=False, device='cpu').to('cpu')
>>> # Attack
>>> model.attack(features, adj, labels, idx_train, target_node, n_perturbations=5)
>>> modified_adj = model.modified_adj
"""
def __init__(self, model, nnodes, feature_shape=None, attack_structure=True, attack_features=False, device='cpu'):
super(FGA, self).__init__(model, nnodes, attack_structure=attack_structure, attack_features=attack_features, device=device)
assert not self.attack_features, "not support attacking features"
if self.attack_features:
self.feature_changes = Parameter(torch.FloatTensor(feature_shape))
self.feature_changes.data.fill_(0)
def attack(self, ori_features, ori_adj, labels, idx_train, target_node, n_perturbations, verbose=False, **kwargs):
"""Generate perturbations on the input graph.
Parameters
----------
ori_features : scipy.sparse.csr_matrix
Original (unperturbed) adjacency matrix
ori_adj : scipy.sparse.csr_matrix
Original (unperturbed) node feature matrix
labels :
node labels
idx_train:
training node indices
target_node : int
target node index to be attacked
n_perturbations : int
Number of perturbations on the input graph. Perturbations could
be edge removals/additions or feature removals/additions.
"""
modified_adj = ori_adj.todense()
modified_features = ori_features.todense()
modified_adj, modified_features, labels = to_tensor(modified_adj, modified_features, labels, device=self.device)
self.surrogate.eval()
if verbose == True:
print('number of pertubations: %s' % n_perturbations)
pseudo_labels = self.surrogate.predict().detach().argmax(1)
pseudo_labels[idx_train] = labels[idx_train]
modified_adj.requires_grad = True
for i in range(n_perturbations):
adj_norm = normalize_adj_tensor(modified_adj)
if self.attack_structure:
output = self.surrogate(modified_features, adj_norm)
loss = F.nll_loss(output[[target_node]], pseudo_labels[[target_node]])
grad = torch.autograd.grad(loss, modified_adj)[0]
# bidirection
grad = (grad[target_node] + grad[:, target_node]) * (-2*modified_adj[target_node] + 1)
grad[target_node] = -10
grad_argmax = torch.argmax(grad)
value = -2*modified_adj[target_node][grad_argmax] + 1
modified_adj.data[target_node][grad_argmax] += value
modified_adj.data[grad_argmax][target_node] += value
if self.attack_features:
pass
modified_adj = modified_adj.detach().cpu().numpy()
modified_adj = sp.csr_matrix(modified_adj)
self.check_adj(modified_adj)
self.modified_adj = modified_adj
# self.modified_features = modified_features
|
the-stack_106_27356 | from __future__ import annotations
import collections
from datetime import timedelta
import functools
import gc
from io import StringIO
import json
import operator
import pickle
import re
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import lib
from pandas._libs.tslibs import Tick, Timestamp, to_offset
from pandas._typing import (
Axis,
CompressionOptions,
FilePathOrBuffer,
FrameOrSeries,
IndexKeyFunc,
IndexLabel,
JSONSerializable,
Label,
Level,
Renamer,
StorageOptions,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, InvalidIndexError
from pandas.util._decorators import Appender, doc, rewrite_axis_style_signature
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.flags import Flags
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, MultiIndex, RangeIndex, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import align_method_FRAME
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import get_indexer_indexer
from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
from pandas._libs.tslibs import BaseOffset
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.indexers import BaseIndexer
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = {**_shared_docs}
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_mgr",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
"_flags",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values", "tshift"])
_metadata: List[str] = []
_is_copy = None
_mgr: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
copy: bool = False,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_mgr", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
@classmethod
def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager:
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
axe = ensure_index(axe)
bm_axis = cls._get_block_manager_axis(a)
mgr = mgr.reindex_axis(axe, axis=bm_axis, copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
# attrs and flags
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes of this dataset.
.. warning::
attrs is experimental and may change without warning.
See Also
--------
DataFrame.flags
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
@property
def flags(self) -> Flags:
"""
Get the properties associated with this pandas object.
The available flags are
* :attr:`Flags.allows_duplicate_labels`
See Also
--------
Flags
DataFrame.attrs
Notes
-----
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags
<Flags(allows_duplicate_labels=True)>
Flags can be get or set using ``.``
>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False
Or by slicing with a key
>>> df.flags["allows_duplicate_labels"]
False
>>> df.flags["allows_duplicate_labels"] = True
"""
return self._flags
def set_flags(
self: FrameOrSeries,
*,
copy: bool = False,
allows_duplicate_labels: Optional[bool] = None,
) -> FrameOrSeries:
"""
Return a new object with updated flags.
Parameters
----------
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
Returns
-------
Series or DataFrame
The same type as the caller.
See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.
Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.
This method is intended to be used in method chains.
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
"""
df = self.copy(deep=copy)
if allows_duplicate_labels is not None:
df.flags["allows_duplicate_labels"] = allows_duplicate_labels
return df
@classmethod
def _validate_dtype(cls, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {cls.__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""
Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""
Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""
Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Internals
@property
def _data(self):
# GH#33054 retained because some downstream packages uses this,
# e.g. fastparquet
return self._mgr
# ----------------------------------------------------------------------
# Axis
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {0: 0, "index": 0, "rows": 0}
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
warnings.warn("_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=3)
return {"index": 0}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
warnings.warn("_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3)
return {0: "index"}
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@classmethod
def _construct_axes_from_arguments(
cls, args, kwargs, require_all: bool = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in cls._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError as err:
if require_all:
raise TypeError(
"not enough/duplicate arguments specified!"
) from err
axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis: Axis) -> int:
try:
return cls._AXIS_TO_AXIS_NUMBER[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
@classmethod
def _get_axis_name(cls, axis: Axis) -> str:
axis_number = cls._get_axis_number(axis)
return cls._AXIS_ORDERS[axis_number]
def _get_axis(self, axis: Axis) -> Index:
axis_number = self._get_axis_number(axis)
assert axis_number in {0, 1}
return self.index if axis_number == 0 else self.columns
@classmethod
def _get_block_manager_axis(cls, axis: Axis) -> int:
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, Union[Series, MultiIndex]]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, Union[Series, MultiIndex]] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self) -> Index:
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self) -> Index:
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._mgr.ndim
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
self._check_inplace_and_allows_duplicate_labels(inplace)
return self._set_axis_nocheck(labels, axis, inplace)
def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool):
# NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy.
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis: int, labels: Index) -> None:
labels = ensure_index(labels)
self._mgr.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
# ignore needed because of NDFrame constructor is different than
# DataFrame/Series constructors.
return self._constructor(
new_values, *new_axes # type: ignore[arg-type]
).__finalize__(self, method="swapaxes")
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the level(s) is removed:
* 0 or 'index': remove level(s) in column.
* 1 or 'columns': remove level(s) in row.
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self, item: Label) -> Union[Series, Any]:
result = self[item]
del self[item]
if self.ndim == 2:
result._reset_cacher()
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new {klass}. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : {klass} (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
self._check_inplace_and_allows_duplicate_labels(inplace)
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
new_index = ax._transform_index(f, level)
result._set_axis_nocheck(new_index, axis=axis_no, inplace=True)
result._clear_item_cache()
if inplace:
self._update_inplace(result)
return None
else:
return result.__finalize__(self, method="rename")
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Note that the ``columns`` parameter is not allowed if the
object is a Series. This parameter only apply for DataFrame
type objects.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._get_axis_name(axis))
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other: object) -> bool:
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal.
The row/column index do not need to have the same type, as long
as the values are considered equal. Corresponding columns must be of
the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not (isinstance(other, type(self)) or isinstance(self, type(other))):
return False
other = cast(NDFrame, other)
return self._mgr.equals(other._mgr)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = self._values
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = self._values
if is_bool_dtype(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(
"Unary plus expects bool, numeric, timedelta, "
f"or object dtype, not {values.dtype}"
)
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._mgr.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self, method="__invert__")
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element Series or DataFrame.
This must be a boolean scalar value, either True or False. It will raise a
ValueError if the Series or DataFrame does not have exactly 1 element, or that
element is not boolean (integer values 0 and 1 will also raise an exception).
Returns
-------
bool
The value in the Series or DataFrame.
See Also
--------
Series.astype : Change the data type of a Series, including to boolean.
DataFrame.astype : Change the data type of a DataFrame, including to boolean.
numpy.bool_ : NumPy boolean data type, used by pandas for boolean values.
Examples
--------
The method will only work for single element objects with a boolean value:
>>> pd.Series([True]).bool()
True
>>> pd.Series([False]).bool()
False
>>> pd.DataFrame({'col': [True]}).bool()
True
>>> pd.DataFrame({'col': [False]}).bool()
False
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
f"The {label_axis_name} label '{key}' is not unique.{multi_message}"
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""
Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@doc(items)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return np.asarray(self._values, dtype=dtype)
def __array_wrap__(
self,
result: np.ndarray,
context: Optional[Tuple[Callable, Tuple[Any, ...], int]] = None,
):
"""
Gets called after a ufunc and other functions.
Parameters
----------
result: np.ndarray
The result of the ufunc or other function called on the NumPy array
returned by __array__
context: tuple of (func, tuple, int)
This parameter is returned by ufuncs as a 3-element tuple: (name of the
ufunc, arguments of the ufunc, domain of the ufunc), but is not set by
other numpy functions.q
Notes
-----
Series implements __array_ufunc_ so this not called for ufunc on Series.
"""
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(
self, method="__array_wrap__"
)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_mgr=self._mgr,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
_flags={k: self.flags[k] for k in self.flags._keys},
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._mgr = state
elif isinstance(state, dict):
if "_data" in state and "_mgr" not in state:
# compat for older pickles
state["_mgr"] = state.pop("_data")
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
flags = state.get("_flags", dict(allows_duplicate_labels=True))
object.__setattr__(self, "_flags", Flags(self, **flags))
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _mgr to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state and k != "_flags":
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
@doc(klass="object")
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
"""
Write {klass} to an Excel sheet.
To write a single {klass} to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: CompressionOptions = "infer",
index: bool_t = True,
indent: Optional[int] = None,
storage_options: StorageOptions = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json : Convert a JSON string to pandas object.
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> import json
>>> df = pd.DataFrame(
... [["a", "b"], ["c", "d"]],
... index=["row 1", "row 2"],
... columns=["col 1", "col 2"],
... )
>>> result = df.to_json(orient="split")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"columns": [
"col 1",
"col 2"
],
"index": [
"row 1",
"row 2"
],
"data": [
[
"a",
"b"
],
[
"c",
"d"
]
]
}
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> result = df.to_json(orient="records")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
{
"col 1": "a",
"col 2": "b"
},
{
"col 1": "c",
"col 2": "d"
}
]
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> result = df.to_json(orient="index")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"row 1": {
"col 1": "a",
"col 2": "b"
},
"row 2": {
"col 1": "c",
"col 2": "d"
}
}
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> result = df.to_json(orient="columns")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"col 1": {
"row 1": "a",
"row 2": "c"
},
"col 2": {
"row 1": "b",
"row 2": "d"
}
}
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> result = df.to_json(orient="values")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
[
[
"a",
"b"
],
[
"c",
"d"
]
]
Encoding with Table Schema:
>>> result = df.to_json(orient="table")
>>> parsed = json.loads(result)
>>> json.dumps(parsed, indent=4) # doctest: +SKIP
{
"schema": {
"fields": [
{
"name": "index",
"type": "string"
},
{
"name": "col 1",
"type": "string"
},
{
"name": "col 2",
"type": "string"
}
],
"primaryKey": [
"index"
],
"pandas_version": "0.20.0"
},
"data": [
{
"index": "row 1",
"col 1": "a",
"col 2": "b"
},
{
"index": "row 2",
"col 1": "c",
"col 2": "d"
}
]
}
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
storage_options=storage_options,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[Union[bool_t, List[str]]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
An `sqlalchemy.engine.Connection` can also be passed to to `con`:
>>> with engine.begin() as connection:
... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
... df1.to_sql('users', con=connection, if_exists='append')
This is allowed to support operations that require that the same
DBAPI connection is used for the entire operation.
>>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
>>> df2.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
(1, 'User 7')]
Overwrite the table with just ``df2``.
>>> df2.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 6'), (1, 'User 7')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: CompressionOptions = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
storage_options: StorageOptions = None,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(
self,
path,
compression=compression,
protocol=protocol,
storage_options=storage_options,
)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@doc(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
position=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{{table.tex}}``.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {{str: function}}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{{longtable}} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{{multirow}} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{{}}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{{}}`` in the output.
This is used with ``\ref{{}}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{{}}`` in the output.
{returns}
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
... mask=['red', 'purple'],
... weapon=['sai', 'bo staff']))
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{{tabular}}{{lll}}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{{tabular}}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
position=position,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[IndexLabel] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: CompressionOptions = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
errors: str = "strict",
storage_options: StorageOptions = None,
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a non-binary file object is passed, it should be opened
with `newline=''`, disabling universal newlines. If a binary
file object is passed, `mode` needs to contain a `'b'`.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
.. versionchanged:: 1.2.0
Support for binary file objects was introduced.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'. `encoding` is not supported if `path_or_buf`
is a non-binary file object.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is one of {'zip', 'gzip', 'bz2'}, or inferred as
one of the above, other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is
supported for compression modes 'gzip' and 'bz2'
as well as 'zip'.
.. versionchanged:: 1.2.0
Compression is supported for binary file objects.
.. versionchanged:: 1.2.0
Previous versions forwarded dict entries for 'gzip' to
`gzip.open` instead of `gzip.GzipFile` which prevented
setting `mtime`.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
.. versionadded:: 1.1.0
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
errors=errors,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
storage_options=storage_options,
)
formatter.save()
if path_or_buf is None:
assert isinstance(formatter.path_or_buf, StringIO)
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""
Reset the cacher.
"""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
loc = self._info_axis.get_loc(item)
self._mgr.iset(loc, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referent, hence
# a copy
if ref is None:
del self._cacher
else:
if len(self) == len(ref):
# otherwise, either self or ref has swapped in new arrays
ref._maybe_cache_changed(cacher[0], self)
else:
# GH#33675 we have swapped in a new array, so parent
# reference to self is now invalid
ref._item_cache.pop(cacher[0], None)
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referent")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._mgr.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self, method="take")
def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
if not isinstance(labels, MultiIndex):
raise TypeError("Index must be a MultiIndex")
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
index = self.index
if isinstance(index, MultiIndex):
try:
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
except TypeError as e:
raise TypeError(f"Expected label or tuple of labels, got {key}") from e
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._mgr.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
# All places that call _get_item_cache have unique columns,
# pending resolution of GH#33047
loc = self.columns.get_loc(item)
values = self._mgr.iget(loc)
res = self._box_col_values(values, loc)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._mgr.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _iset_item(self, loc: int, value) -> None:
self._mgr.iset(loc, value)
self._clear_item_cache()
def _set_item(self, key, value) -> None:
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
return
NDFrame._iset_item(self, loc, value)
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referent", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referent")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referent":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
loc = self.axes[-1].get_loc(key)
self._mgr.idelete(loc)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def _check_inplace_and_allows_duplicate_labels(self, inplace):
if inplace and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'inplace=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self) -> bool_t:
"""Return boolean indicating if self is view of another array """
return self._mgr.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
result : same type as self
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._mgr = result._mgr
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
# error: Incompatible return value type (got "Optional[FrameOrSeries]",
# expected "FrameOrSeries")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
# error: Incompatible return value type (got "Optional[FrameOrSeries]",
# expected "FrameOrSeries")
# error: Argument 1 to "rename" of "NDFrame" has incompatible type
# "**Dict[str, partial[str]]"; expected "Union[str, int, None]"
return self.rename(**mapper) # type: ignore[return-value, arg-type]
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
key: ValueKeyFunc = None,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
Apply the key function to the values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect a
``Series`` and return a Series with the same shape as the input.
It will be applied to each column in `by` independently.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
See Also
--------
DataFrame.sort_index : Sort a DataFrame by the index.
Series.sort_values : Similar method for a Series.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... 'col4': ['a', 'B', 'c', 'D', 'e', 'F']
... })
>>> df
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3 col4
1 A 1 1 B
0 A 2 0 a
2 B 9 9 c
5 C 4 3 F
4 D 7 2 e
3 NaN 8 4 D
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3 col4
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
3 NaN 8 4 D
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3 col4
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
2 B 9 9 c
0 A 2 0 a
1 A 1 1 B
Sorting with a key function
>>> df.sort_values(by='col4', key=lambda col: col.str.lower())
col1 col2 col3 col4
0 A 2 0 a
1 A 1 1 B
2 B 9 9 c
3 NaN 8 4 D
4 D 7 2 e
5 C 4 3 F
Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.
>>> df = pd.DataFrame({
... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
... "value": [10, 20, 30, 40, 50]
... })
>>> df
time value
0 0hr 10
1 128hr 20
2 72hr 30
3 48hr 40
4 96hr 50
>>> from natsort import index_natsorted
>>> df.sort_values(
... by="time",
... key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
time value
0 0hr 10
3 48hr 40
2 72hr 30
4 96hr 50
1 128hr 20
"""
raise AbstractMethodError(self)
def sort_index(
self,
axis=0,
level=None,
ascending: bool_t = True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool_t = True,
ignore_index: bool_t = False,
key: IndexKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
target = self._get_axis(axis)
indexer = get_indexer_indexer(
target, level, ascending, kind, na_position, sort_remaining, key
)
if indexer is None:
if inplace:
return
else:
return self.copy()
baxis = self._get_block_manager_axis(axis)
new_data = self._mgr.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if ignore_index:
axis = 1 if isinstance(self, ABCDataFrame) else 0
new_data.axes[axis] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_index")
@doc(
klass=_shared_doc_kwargs["klass"],
axes=_shared_doc_kwargs["axes"],
optional_labels="",
optional_axis="",
)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform {klass} to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
{optional_labels}
{axes} : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
{optional_axis}
method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
{klass} with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={{'index', 'columns'}}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self, method="reindex")
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._mgr
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
# If we've made a copy once, no need to make another one
copy = False
if copy and new_data is self._mgr:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x) -> bool:
assert like is not None # needed for mypy
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x) -> bool:
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
.. versionchanged:: 1.1.0
array-like and BitGenerator (for NumPy>=1.17) object now passed to
np.random.RandomState() as seed
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
DataFrameGroupBy.sample: Generates random samples from each group of a
DataFrame object.
SeriesGroupBy.sample: Generates random samples from each group of a
Series object.
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights._values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
@doc(klass=_shared_doc_kwargs["klass"])
def pipe(self, func, *args, **kwargs):
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the {klass}.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the {klass}.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... ) # doctest: +SKIP
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... ) # doctest: +SKIP
"""
return com.pipe(self, func, *args, **kwargs)
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method: Optional[str] = None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : str, optional
A passed method name providing context on where ``__finalize__``
was called.
.. warning:
The value passed as `method` are not currently considered
stable across pandas releases.
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
if method == "concat":
allows_duplicate_labels = all(
x.flags.allows_duplicate_labels for x in other.objs
)
self.flags.allows_duplicate_labels = allows_duplicate_labels
return self
def __getattr__(self, name: str):
"""
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
"""
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""
Consolidate _mgr -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._mgr.blocks)
result = f()
if len(self._mgr.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._mgr = self._mgr.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._mgr.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self) -> bool_t:
f = lambda: self._mgr.is_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._mgr.is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._mgr.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._mgr.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._mgr.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
data = self._mgr.get_dtypes()
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._mgr.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
Create a series of dates:
>>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
>>> ser_date
0 2020-01-01
1 2020-01-02
2 2020-01-03
dtype: datetime64[ns]
Datetimes are localized to UTC first before
converting to the specified timezone:
>>> ser_date.astype('datetime64[ns, US/Eastern]')
0 2019-12-31 19:00:00-05:00
1 2020-01-01 19:00:00-05:00
2 2020-01-02 19:00:00-05:00
dtype: datetime64[ns, US/Eastern]
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self, method="astype")
# GH 33113: handle empty frame or series
if not results:
return self.copy()
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._mgr.copy(deep=deep)
self._clear_item_cache()
return self._constructor(data).__finalize__(self, method="copy")
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
return self._constructor(
self._mgr.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=True,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._mgr.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self, method="infer_objects")
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._mgr = result._mgr.downcast()
return result
new_data = self._mgr.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
value = value.reindex(self.index, copy=False)
value = value._values
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._mgr.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)._data
else:
raise ValueError(f"invalid fill value with a {type(value)}")
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="fillna")
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
pad = ffill
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
backfill = bfill
@doc(klass=_shared_doc_kwargs["klass"])
def replace(
self,
to_replace=None,
value=None,
inplace: bool_t = False,
limit: Optional[int] = None,
regex=False,
method="pad",
):
"""
Replace values given in `to_replace` with `value`.
Values of the {klass} are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int or None, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {{'pad', 'ffill', 'bfill', `None`}}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
Returns
-------
{klass}
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
{klass}.fillna : Fill NA values.
{klass}.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']}})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({{0: 10, 1: 100}})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({{'A': 0, 'B': 5}}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({{'A': {{0: 100, 4: 400}}}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']}})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Compare the behavior of ``s.replace({{'a': None}})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({{'a': None}})`` is equivalent to
``s.replace(to_replace={{'a': None}}, value=None, method=None)``:
>>> s.replace({{'a': None}})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
if not (
is_scalar(to_replace)
or is_re_compilable(to_replace)
or is_list_like(to_replace)
):
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"{repr(type(to_replace).__name__)}"
)
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool")
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
if items:
keys, values = zip(*items)
else:
keys, values = ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
if inplace:
return
return self.copy()
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
# Note: Checking below for `in foo.keys()` instead of
# `in foo` is needed for when we have a Series and not dict
mapping = {
col: (to_replace[col], value[col])
for col in to_replace.keys()
if col in value.keys() and col in self
}
return self._replace_columnwise(mapping, inplace, regex)
# {'A': NA} -> 0
elif not is_list_like(value):
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-like to_replace "
"and non-None value"
)
mapping = {
col: (to_rep, value) for col, to_rep in to_replace.items()
}
return self._replace_columnwise(mapping, inplace, regex)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
self._consolidate_inplace()
new_data = self._mgr.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
# Operate column-wise
if self.ndim == 1:
raise ValueError(
"Series.replace cannot use dict-value and "
"non-None to_replace"
)
mapping = {col: (to_replace, val) for col, val in value.items()}
return self._replace_columnwise(mapping, inplace, regex)
elif not is_list_like(value): # NA -> 0
new_data = self._mgr.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="replace")
def interpolate(
self: FrameOrSeries,
method: str = "linear",
axis: Axis = 0,
limit: Optional[int] = None,
inplace: bool_t = False,
limit_direction: Optional[str] = None,
limit_area: Optional[str] = None,
downcast: Optional[str] = None,
**kwargs,
) -> Optional[FrameOrSeries]:
"""
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
'cubicspline': Wrappers around the SciPy interpolation methods of
similar names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {{'forward', 'backward', 'both'}}, Optional
Consecutive NaNs will be filled in this direction.
If limit is specified:
* If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
* If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
'backwards'.
If 'limit' is not specified:
* If 'method' is 'backfill' or 'bfill', the default is 'backward'
* else the default is 'forward'
.. versionchanged:: 1.1.0
raises ValueError if `limit_direction` is 'forward' or 'both' and
method is 'backfill' or 'bfill'.
raises ValueError if `limit_direction` is 'backward' or 'both' and
method is 'pad' or 'ffill'.
limit_area : {{`None`, 'inside', 'outside'}}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
``**kwargs`` : optional
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
fillna_methods = ["ffill", "bfill", "pad", "backfill"]
should_transpose = axis == 1 and method not in fillna_methods
obj = self.T if should_transpose else self
if obj.empty:
return self.copy()
if method not in fillna_methods:
axis = self._info_axis_number
if isinstance(obj.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
# Set `limit_direction` depending on `method`
if limit_direction is None:
limit_direction = (
"backward" if method in ("backfill", "bfill") else "forward"
)
else:
if method in ("pad", "ffill") and limit_direction != "forward":
raise ValueError(
f"`limit_direction` must be 'forward' for method `{method}`"
)
if method in ("backfill", "bfill") and limit_direction != "backward":
raise ValueError(
f"`limit_direction` must be 'backward' for method `{method}`"
)
if obj.ndim == 2 and np.all(obj.dtypes == np.dtype(object)):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(obj.index))
else:
index = obj.index
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index.dtype)
or is_datetime64_any_dtype(index.dtype)
or is_timedelta64_dtype(index.dtype)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
new_data = obj._mgr.interpolate(
method=method,
axis=axis,
index=index,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
result = self._constructor(new_data)
if should_transpose:
result = result.T
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="interpolate")
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
return self._constructor_sliced(
index=self.columns, name=where, dtype=np.float64
)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
return self._constructor(np.nan, index=where, columns=self.columns)
else:
return self._constructor_sliced(
np.nan, index=self.columns, name=where[0]
)
locs = self.index.asof_locs(where, ~(nulls._values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
@doc(klass=_shared_doc_kwargs["klass"])
def isna(self: FrameOrSeries) -> FrameOrSeries:
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is not an NA value.
See Also
--------
{klass}.isnull : Alias of isna.
{klass}.notna : Boolean inverse of isna.
{klass}.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
return isna(self).__finalize__(self, method="isna")
@doc(isna, klass=_shared_doc_kwargs["klass"])
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self, method="isnull")
@doc(klass=_shared_doc_kwargs["klass"])
def notna(self: FrameOrSeries) -> FrameOrSeries:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
{klass}
Mask of bool values for each element in {klass} that
indicates whether an element is not an NA value.
See Also
--------
{klass}.notnull : Alias of notna.
{klass}.isna : Boolean inverse of notna.
{klass}.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame(dict(age=[5, 6, np.NaN],
... born=[pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... name=['Alfred', 'Batman', ''],
... toy=[None, 'Batmobile', 'Joker']))
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
return notna(self).__finalize__(self, method="notna")
@doc(notna, klass=_shared_doc_kwargs["klass"])
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self, method="notnull")
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self._values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
return self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
See Also
--------
Series.clip : Trim values at input threshold in series.
DataFrame.clip : Trim values at input threshold in dataframe.
numpy.clip : Clip (limit) the values in an array.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
Frequency DateOffset or string.
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
Same type as caller
Object converted to the specified frequency.
See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_at_time(time, asof=asof)
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_between_time(
start_time, end_time, include_start=include_start, include_end=include_end
)
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: Optional[int] = None,
on=None,
level=None,
origin: Union[str, TimestampConvertibleTypes] = "start_day",
offset: Optional[TimedeltaConvertibleTypes] = None,
) -> Resampler:
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
.. deprecated:: 1.1.0
You should add the loffset to the `df.index` after the resample.
See below.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
.. deprecated:: 1.1.0
The new arguments that you should use are 'offset' or 'origin'.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin
must match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.resample('17min').sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='epoch').sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', origin='2000-01-01').sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.resample('17min', origin='start').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.resample('17min', offset='23h30min').sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.resample('17min', offset='2min').sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `loffset` argument:
>>> from pandas.tseries.frequencies import to_offset
>>> loffset = '19min'
>>> ts_out = ts.resample('17min').sum()
>>> ts_out.index = ts_out.index + to_offset(loffset)
>>> ts_out
2000-10-01 23:33:00 0
2000-10-01 23:50:00 9
2000-10-02 00:07:00 21
2000-10-02 00:24:00 54
2000-10-02 00:41:00 24
Freq: 17T, dtype: int64
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
origin=origin,
offset=offset,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select initial periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str, DateOffset or dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'1M' will display all the rows having their index within the first month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if isinstance(offset, Tick):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Select final periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the last few rows based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self, method="rank")
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
@Appender(_shared_docs["compare"] % _shared_doc_kwargs)
def compare(
self,
other,
align_axis: Axis = 1,
keep_shape: bool_t = False,
keep_equal: bool_t = False,
):
from pandas.core.reshape.concat import concat
if type(self) is not type(other):
cls_self, cls_other = type(self).__name__, type(other).__name__
raise TypeError(
f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'"
)
mask = ~((self == other) | (self.isna() & other.isna()))
keys = ["self", "other"]
if not keep_equal:
self = self.where(mask)
other = other.where(mask)
if not keep_shape:
if isinstance(self, ABCDataFrame):
cmask = mask.any()
rmask = mask.any(axis=1)
self = self.loc[rmask, cmask]
other = other.loc[rmask, cmask]
else:
self = self[mask]
other = other[mask]
if align_axis in (1, "columns"): # This is needed for Series
axis = 1
else:
axis = self._get_axis_number(align_axis)
diff = concat([self, other], axis=axis, keys=keys)
if axis >= self.ndim:
# No need to reorganize data if stacking on new axis
# This currently applies for stacking two Series on columns
return diff
ax = diff._get_axis(axis)
ax_names = np.array(ax.names)
# set index names to positions to avoid confusion
ax.names = np.arange(len(ax_names))
# bring self-other to inner level
order = list(range(1, ax.nlevels)) + [0]
if isinstance(diff, ABCDataFrame):
diff = diff.reorder_levels(order, axis=axis)
else:
diff = diff.reorder_levels(order)
# restore the index names in order
diff._get_axis(axis=axis).names = ax_names[order]
# reorder axis to keep things organized
indices = (
np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten()
)
diff = diff.take(indices, axis=axis)
return diff
@doc(**_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : {axes_single_arg}, default 0
Filling axis, method and limit.
broadcast_axis : {axes_single_arg}, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : ({klass}, type of other)
Aligned objects.
"""
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
_left = left.fillna(method=method, axis=fill_axis, limit=limit)
assert _left is not None # needed for mypy
left = _left
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index.dtype):
if left.index.tz != right.index.tz:
if join_index is not None:
# GH#33671 ensure we don't change the index on
# our original Series (NB: by default deep=False)
left = left.copy()
right = right.copy()
left.index = join_index
right.index = join_index
return (
left.__finalize__(self),
right.__finalize__(other),
)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._mgr
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._mgr:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index.dtype):
if left.index.tz != right.index.tz:
if join_index is not None:
# GH#33671 ensure we don't change the index on
# our original Series (NB: by default deep=False)
left = left.copy()
right = right.copy()
left.index = join_index
right.index = join_index
return (
left.__finalize__(self),
right.__finalize__(other),
)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not cond.empty:
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
else:
# GH#21947 we have an empty DataFrame/Series, could be object-dtype
cond = cond.astype(bool)
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if isinstance(other, NDFrame):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond._values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = other[0]
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = np.asarray(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
if align and isinstance(other, NDFrame):
other = other.reindex(self._info_axis, axis=self._info_axis_number)
if isinstance(cond, NDFrame):
cond = cond.reindex(self._info_axis, axis=self._info_axis_number)
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._mgr.putmask(
mask=cond, new=other, align=align, axis=block_axis
)
result = self._constructor(new_data)
return self._update_inplace(result)
else:
new_data = self._mgr.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
result = self._constructor(new_data)
return result.__finalize__(self)
@doc(
klass=_shared_doc_kwargs["klass"],
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Replace values where the condition is {cond_rev}.
Parameters
----------
cond : bool {klass}, array-like, or callable
Where `cond` is {cond}, keep the original value. Where
{cond_rev}, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the {klass} and
should return boolean {klass} or array. The callable must
not change input {klass} (though pandas doesn't check it).
other : scalar, {klass}, or callable
Entries where `cond` is {cond_rev} are replaced with
corresponding value from `other`.
If other is callable, it is computed on the {klass} and
should return scalar or {klass}. The callable must not
change input {klass} (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {{'raise', 'ignore'}}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.{name_other}` : Return an object of same shape as
self.
Notes
-----
The {name} method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``{cond}`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``{name}`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df % 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@doc(
where,
klass=_shared_doc_kwargs["klass"],
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
@doc(klass=_shared_doc_kwargs["klass"])
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
"""
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`. `freq` can be inferred
when specified as "infer" as long as either freq or inferred_freq
attribute is set in the index.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
If `freq` is specified as "infer" then it will be inferred from
the freq or inferred_freq attributes of the index. If neither of
those attributes exist, a ValueError is thrown.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 1.1.0
Returns
-------
{klass}
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45],
... "Col2": [13, 23, 18, 33, 48],
... "Col3": [17, 27, 22, 37, 52]}},
... index=pd.date_range("2020-01-01", "2020-01-05"))
>>> df
Col1 Col2 Col3
2020-01-01 10 13 17
2020-01-02 20 23 27
2020-01-03 15 18 22
2020-01-04 30 33 37
2020-01-05 45 48 52
>>> df.shift(periods=3)
Col1 Col2 Col3
2020-01-01 NaN NaN NaN
2020-01-02 NaN NaN NaN
2020-01-03 NaN NaN NaN
2020-01-04 10.0 13.0 17.0
2020-01-05 20.0 23.0 27.0
>>> df.shift(periods=1, axis="columns")
Col1 Col2 Col3
2020-01-01 NaN 10.0 13.0
2020-01-02 NaN 20.0 23.0
2020-01-03 NaN 15.0 18.0
2020-01-04 NaN 30.0 33.0
2020-01-05 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
2020-01-01 0 0 0
2020-01-02 0 0 0
2020-01-03 0 0 0
2020-01-04 10 13 17
2020-01-05 20 23 27
>>> df.shift(periods=3, freq="D")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
>>> df.shift(periods=3, freq="infer")
Col1 Col2 Col3
2020-01-04 10 13 17
2020-01-05 20 23 27
2020-01-06 15 18 22
2020-01-07 30 33 37
2020-01-08 45 48 52
"""
if periods == 0:
return self.copy()
if freq is None:
# when freq is None, data is shifted, index is not
block_axis = self._get_block_manager_axis(axis)
new_data = self._mgr.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
return self._constructor(new_data).__finalize__(self, method="shift")
# when freq is given, index is shifted, data is not
index = self._get_axis(axis)
if freq == "infer":
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not set in the index hence cannot be inferred"
raise ValueError(msg)
elif isinstance(freq, str):
freq = to_offset(freq)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq != orig_freq:
assert orig_freq is not None # for mypy
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
new_ax = index.shift(periods)
else:
new_ax = index.shift(periods, freq)
result = self.set_axis(new_ax, axis)
return result.__finalize__(self, method="shift")
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self, method="slice_shift")
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
.. deprecated:: 1.1.0
Use `shift` instead.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
warnings.warn(
(
"tshift is deprecated and will be removed in a future version. "
"Please use shift instead."
),
FutureWarning,
stacklevel=2,
)
if freq is None:
freq = "infer"
return self.shift(periods, freq, axis)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax._is_all_dates:
if is_object_dtype(ax.dtype):
warnings.warn(
"Treating object-dtype Index of date objects as DatetimeIndex "
"is deprecated, will be removed in a future version.",
FutureWarning,
)
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
if len(ax) > 1 and ax.is_monotonic_decreasing:
before, after = after, before
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
{klass}
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_convert")
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backward with a timedelta object or `'shift_forward'`
or `'shift_backward'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self.copy(deep=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self, method="tz_localize")
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries,
percentiles=None,
include=None,
exclude=None,
datetime_is_numeric=False,
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
datetime_is_numeric : bool, default False
Whether to treat datetime dtypes as numeric. This affects statistics
calculated for the column. For DataFrame input, this also
controls whether datetime columns are included by default.
.. versionadded:: 1.1.0
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe(datetime_is_numeric=True)
count 3
mean 2006-09-01 08:00:00
min 2000-01-01 00:00:00
25% 2004-12-31 12:00:00
50% 2010-01-01 00:00:00
75% 2010-01-01 00:00:00
max 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all') # doctest: +SKIP
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN a
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[object]) # doctest: +SKIP
object
count 3
unique 3
top a
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number]) # doctest: +SKIP
categorical object
count 3 3
unique 3 3
top f a
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[object]) # doctest: +SKIP
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_any_dtype(data.dtype):
if self.ndim == 1:
stacklevel = 4
else:
stacklevel = 5
warnings.warn(
"Treating datetime data as categorical rather than numeric in "
"`.describe` is deprecated and will be removed in a future "
"version of pandas. Specify `datetime_is_numeric=True` to "
"silence this warning and adopt the future behavior now.",
FutureWarning,
stacklevel=stacklevel,
)
tz = data.dt.tz
asint = data.dropna().values.view("i8")
top = Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
names += ["top", "freq", "first", "last"]
result += [
top,
freq,
Timestamp(asint.min(), tz=tz),
Timestamp(asint.max(), tz=tz),
]
else:
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data.dtype):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data.dtype) and datetime_is_numeric:
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data.dtype):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
default_include = [np.number]
if datetime_is_numeric:
default_include.append("datetime")
data = self.select_dtypes(include=default_include)
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Label] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
"""
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame(dict(
... FR=[4.0405, 4.0963, 4.3149],
... GR=[1.7246, 1.7482, 1.8519],
... IT=[804.74, 810.01, 860.13]),
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame(dict([
... ('2016', [1769950, 30586265]),
... ('2015', [1500923, 40912316]),
... ('2014', [1371819, 41403351])]),
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
_data = self.fillna(method=fill_method, axis=axis, limit=limit)
assert _data is not None # needed for mypy
data = _data
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name1, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_any_desc,
func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc=_all_desc,
func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
@doc(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also="",
examples="",
)
def mad(self, axis=None, skipna=None, level=None):
"""
{desc}
Parameters
----------
axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
Returns
-------
{name1} or {name2} (if level specified)\
{see_also}\
{examples}
"""
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
func=nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
func=nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
func=nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="minimum",
accum_func=np.minimum.accumulate,
accum_func_name="min",
examples=_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="sum",
accum_func=np.cumsum,
accum_func_name="sum",
examples=_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="product",
accum_func=np.cumprod,
accum_func_name="prod",
examples=_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="maximum",
accum_func=np.maximum.accumulate,
accum_func_name="max",
examples=_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the sum of the values for the requested axis.\n\n"
"This is equivalent to the method ``numpy.sum``.",
func=nanops.nansum,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the mean of the values for the requested axis.",
func=nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
func=nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
func=nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the product of the values for the requested axis.",
func=nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the median of the values for the requested axis.",
func=nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
func=nanops.nanmax,
see_also=_stat_func_see_also,
examples=_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
func=nanops.nanmin,
see_also=_stat_func_see_also,
examples=_min_examples,
)
@doc(Rolling)
def rolling(
self,
window: Union[int, timedelta, BaseOffset, BaseIndexer],
min_periods: Optional[int] = None,
center: bool_t = False,
win_type: Optional[str] = None,
on: Optional[str] = None,
axis: Axis = 0,
closed: Optional[str] = None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
@doc(Expanding)
def expanding(
self, min_periods: int = 1, center: Optional[bool_t] = None, axis: Axis = 0
) -> Expanding:
axis = self._get_axis_number(axis)
if center is not None:
warnings.warn(
"The `center` argument on `expanding` will be removed in the future",
FutureWarning,
stacklevel=2,
)
else:
center = False
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
@doc(ExponentialMovingWindow)
def ewm(
self,
com: Optional[float] = None,
span: Optional[float] = None,
halflife: Optional[Union[float, TimedeltaConvertibleTypes]] = None,
alpha: Optional[float] = None,
min_periods: int = 0,
adjust: bool_t = True,
ignore_na: bool_t = False,
axis: Axis = 0,
times: Optional[Union[str, np.ndarray, FrameOrSeries]] = None,
) -> ExponentialMovingWindow:
axis = self._get_axis_number(axis)
return ExponentialMovingWindow(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
)
# ----------------------------------------------------------------------
# Misc methods
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@doc(position="first", klass=_shared_doc_kwargs["klass"])
def first_valid_index(self):
"""
Return index for {position} non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty {klass}.
"""
return self._find_valid_index("first")
@doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"])
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
{desc}
Parameters
----------
axis : {axis_descr}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
{min_count}\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
{name1} or {name2} (if level specified)\
{see_also}\
{examples}
"""
_num_ddof_doc = """
{desc}
Parameters
----------
axis : {axis_descr}
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
{name1} or {name2} (if level specified)
Notes
-----
To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
default `ddof=1`)\n"""
_bool_doc = """
{desc}
Parameters
----------
axis : {{0 or 'index', 1 or 'columns', None}}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be {empty_value}, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a {name1}.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
{name1} or {name2}
If level is specified, then, {name2} is returned; otherwise, {name1}
is returned.
{see_also}
{examples}"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative {desc} over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
{desc}.
Parameters
----------
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
{name1} or {name2}
Return cumulative {desc} of {name1} or {name2}.
See Also
--------
core.window.Expanding.{accum_func_name} : Similar functionality
but ignores ``NaN`` values.
{name2}.{accum_func_name} : Return the {desc} over
{name2} axis.
{name2}.cummax : Return cumulative maximum over {name2} axis.
{name2}.cummin : Return cumulative minimum over {name2} axis.
{name2}.cumsum : Return cumulative sum over {name2} axis.
{name2}.cumprod : Return cumulative product over {name2} axis.
{examples}"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
"""
def _make_min_count_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@doc(
_num_doc,
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str = "",
examples: str = "",
) -> Callable:
@doc(
_num_doc,
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(
cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
) -> Callable:
@doc(_num_ddof_doc, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
accum_func: Callable,
accum_func_name: str,
examples: str,
) -> Callable:
@doc(
_cnum_doc,
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def block_accum_func(blk_values):
values = blk_values.T if hasattr(blk_values, "T") else blk_values
result = nanops.na_accum_func(values, accum_func, skipna=skipna)
result = result.T if hasattr(result, "T") else result
return result
result = self._mgr.apply(block_accum_func)
return self._constructor(result).__finalize__(self, method=name)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls,
name: str,
name1: str,
name2: str,
axis_descr: str,
desc: str,
func: Callable,
see_also: str,
examples: str,
empty_value: bool,
) -> Callable:
@doc(
_bool_doc,
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
if self.ndim > 1 and axis is None:
# Reduce along one dimension then the other, to simplify DataFrame._reduce
res = logical_func(
self, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
)
return logical_func(res, skipna=skipna, **kwargs)
return self._reduce(
func,
name=name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
the-stack_106_27357 | import win32api
from lib.pywin32_keys import VK
#check which key is pressed
def check_key_pressed():
try:
while True:
for i in VK:
if win32api.GetAsyncKeyState(VK[i]) !=0:
return i
except:
print("Something went wrong while checking which key is pressed")
#take id of key from library of virtual keys
def give_vk_id(name):
return VK[name]
if __name__=="__main__":
print("Please press the button:")
name_of_key=check_key_pressed()
print("You pressed: "+ name_of_key + ", id virtual key is: "+ str(give_vk_id(name_of_key)))
#Also if you need to check again which key is press -> You need to wait some time -> Use time.spleep
|
the-stack_106_27360 | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import socket
import sys
import errno
import traceback
import click
from enum import Enum
import tornado.concurrent
import tornado.gen
import tornado.ioloop
import tornado.web
import tornado.websocket
from streamlit import config
from streamlit import util
from streamlit.ForwardMsgCache import ForwardMsgCache
from streamlit.ForwardMsgCache import create_reference_msg
from streamlit.ForwardMsgCache import populate_hash_if_needed
from streamlit.ReportSession import ReportSession
from streamlit.logger import get_logger
from streamlit.proto.BackMsg_pb2 import BackMsg
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.server.routes import AddSlashHandler
from streamlit.server.routes import DebugHandler
from streamlit.server.routes import HealthHandler
from streamlit.server.routes import MessageCacheHandler
from streamlit.server.routes import MetricsHandler
from streamlit.server.routes import StaticFileHandler
from streamlit.server.server_util import MESSAGE_SIZE_LIMIT
from streamlit.server.server_util import is_cacheable_msg
from streamlit.server.server_util import is_url_from_allowed_origins
from streamlit.server.server_util import make_url_path_regex
from streamlit.server.server_util import serialize_forward_msg
LOGGER = get_logger(__name__)
TORNADO_SETTINGS = {
"compress_response": True, # Gzip HTTP responses.
"websocket_ping_interval": 20, # Ping every 20s to keep WS alive.
"websocket_ping_timeout": 30, # Pings should be responded to within 30s.
"websocket_max_message_size": MESSAGE_SIZE_LIMIT, # Up the WS size limit.
}
# Dictionary key used to mark the script execution context that starts
# up before the first browser connects.
PREHEATED_REPORT_SESSION = "PREHEATED_REPORT_SESSION"
# When server.port is not available it will look for the next available port
# up to MAX_PORT_SEARCH_RETRIES.
MAX_PORT_SEARCH_RETRIES = 100
class SessionInfo(object):
"""Type stored in our _report_sessions dict.
For each ReportSession, the server tracks that session's
report_run_count. This is used to track the age of messages in
the ForwardMsgCache.
"""
def __init__(self, session):
"""Initialize a SessionInfo instance.
Parameters
----------
session : ReportSession
"""
self.session = session
self.report_run_count = 0
class State(Enum):
INITIAL = "INITIAL"
WAITING_FOR_FIRST_BROWSER = "WAITING_FOR_FIRST_BROWSER"
ONE_OR_MORE_BROWSERS_CONNECTED = "ONE_OR_MORE_BROWSERS_CONNECTED"
NO_BROWSERS_CONNECTED = "NO_BROWSERS_CONNECTED"
STOPPING = "STOPPING"
STOPPED = "STOPPED"
class RetriesExceeded(Exception):
pass
def server_port_is_manually_set():
return config.is_manually_set("server.port")
def start_listening(app):
"""Takes the server start listening at the configured port.
In case the port is already taken it tries listening to the next available
port. It will error after MAX_PORT_SEARCH_RETRIES attempts.
"""
call_count = 0
while call_count < MAX_PORT_SEARCH_RETRIES:
port = config.get_option("server.port")
try:
app.listen(port)
break # It worked! So let's break out of the loop.
except (OSError, socket.error) as e:
if e.errno == errno.EADDRINUSE:
if server_port_is_manually_set():
LOGGER.error("Port %s is already in use", port)
sys.exit(1)
else:
LOGGER.debug(
"Port %s already in use, trying to use the next one.", port
)
port += 1
# Save port 3000 because it is used for the development
# server in the front end.
if port == 3000:
port += 1
config._set_option(
"server.port", port, config.ConfigOption.STREAMLIT_DEFINITION
)
call_count += 1
else:
raise
if call_count >= MAX_PORT_SEARCH_RETRIES:
raise RetriesExceeded(
"Cannot start Streamlit server. Port %s is already in use, and "
"Streamlit was unable to find a free port after %s attempts.",
port,
MAX_PORT_SEARCH_RETRIES,
)
class Server(object):
_singleton = None
@classmethod
def get_current(cls):
"""Return the singleton instance."""
if cls._singleton is None:
raise RuntimeError("Server has not been initialized yet")
return Server._singleton
def __init__(self, ioloop, script_path, command_line):
"""Create the server. It won't be started yet.
Parameters
----------
ioloop : tornado.ioloop.IOLoop
script_path : str
command_line : str
"""
if Server._singleton is not None:
raise RuntimeError("Server already initialized. Use .get_current() instead")
Server._singleton = self
_set_tornado_log_levels()
self._ioloop = ioloop
self._script_path = script_path
self._command_line = command_line
# Mapping of WebSocket->SessionInfo.
self._session_infos = {}
self._must_stop = threading.Event()
self._state = None
self._set_state(State.INITIAL)
self._message_cache = ForwardMsgCache()
def start(self, on_started):
"""Start the server.
Parameters
----------
on_started : callable
A callback that will be called when the server's run-loop
has started, and the server is ready to begin receiving clients.
"""
if self._state != State.INITIAL:
raise RuntimeError("Server has already been started")
LOGGER.debug("Starting server...")
app = self._create_app()
start_listening(app)
port = config.get_option("server.port")
LOGGER.debug("Server started on port %s", port)
self._ioloop.spawn_callback(self._loop_coroutine, on_started)
def get_debug(self):
return {"report": self._report.get_debug()}
def _create_app(self):
"""Create our tornado web app.
Returns
-------
tornado.web.Application
"""
base = config.get_option("server.baseUrlPath")
routes = [
(
make_url_path_regex(base, "stream"),
_BrowserWebSocketHandler,
dict(server=self),
),
(
make_url_path_regex(base, "healthz"),
HealthHandler,
dict(callback=lambda: self.is_ready_for_browser_connection),
),
(make_url_path_regex(base, "debugz"), DebugHandler, dict(server=self)),
(make_url_path_regex(base, "metrics"), MetricsHandler),
(
make_url_path_regex(base, "message"),
MessageCacheHandler,
dict(cache=self._message_cache),
),
]
if config.get_option("global.developmentMode") and config.get_option(
"global.useNode"
):
LOGGER.debug("Serving static content from the Node dev server")
else:
static_path = util.get_static_dir()
LOGGER.debug("Serving static content from %s", static_path)
routes.extend(
[
(
make_url_path_regex(base, "(.*)"),
StaticFileHandler,
{"path": "%s/" % static_path, "default_filename": "index.html"},
),
(
make_url_path_regex(base, trailing_slash=False),
AddSlashHandler
)
]
)
return tornado.web.Application(routes, **TORNADO_SETTINGS)
def _set_state(self, new_state):
LOGGER.debug("Server state: %s -> %s" % (self._state, new_state))
self._state = new_state
@property
def is_ready_for_browser_connection(self):
return self._state not in (State.INITIAL, State.STOPPING, State.STOPPED)
@property
def browser_is_connected(self):
return self._state == State.ONE_OR_MORE_BROWSERS_CONNECTED
@tornado.gen.coroutine
def _loop_coroutine(self, on_started=None):
try:
if self._state == State.INITIAL:
self._set_state(State.WAITING_FOR_FIRST_BROWSER)
elif self._state == State.ONE_OR_MORE_BROWSERS_CONNECTED:
pass
else:
raise RuntimeError("Bad server state at start: %s" % self._state)
if on_started is not None:
on_started(self)
while not self._must_stop.is_set():
if self._state == State.WAITING_FOR_FIRST_BROWSER:
pass
elif self._state == State.ONE_OR_MORE_BROWSERS_CONNECTED:
# Shallow-clone our sessions into a list, so we can iterate
# over it and not worry about whether it's being changed
# outside this coroutine.
session_pairs = list(self._session_infos.items())
for ws, session_info in session_pairs:
if ws is PREHEATED_REPORT_SESSION:
continue
if ws is None:
continue
msg_list = session_info.session.flush_browser_queue()
for msg in msg_list:
try:
self._send_message(ws, session_info, msg)
except tornado.websocket.WebSocketClosedError:
self._remove_browser_connection(ws)
yield
yield
elif self._state == State.NO_BROWSERS_CONNECTED:
pass
else:
# Break out of the thread loop if we encounter any other state.
break
yield tornado.gen.sleep(0.01)
# Shut down all ReportSessions
for session_info in list(self._session_infos.values()):
session_info.session.shutdown()
self._set_state(State.STOPPED)
except Exception as e:
print("EXCEPTION!", e)
traceback.print_stack(file=sys.stdout)
LOGGER.info(
"""
Please report this bug at https://github.com/streamlit/streamlit/issues.
"""
)
finally:
self._on_stopped()
def _send_message(self, ws, session_info, msg):
"""Send a message to a client.
If the client is likely to have already cached the message, we may
instead send a "reference" message that contains only the hash of the
message.
Parameters
----------
ws : _BrowserWebSocketHandler
The socket connected to the client
session_info : SessionInfo
The SessionInfo associated with websocket
msg : ForwardMsg
The message to send to the client
"""
msg.metadata.cacheable = is_cacheable_msg(msg)
msg_to_send = msg
if msg.metadata.cacheable:
populate_hash_if_needed(msg)
if self._message_cache.has_message_reference(
msg, session_info.session, session_info.report_run_count
):
# This session has probably cached this message. Send
# a reference instead.
LOGGER.debug("Sending cached message ref (hash=%s)" % msg.hash)
msg_to_send = create_reference_msg(msg)
# Cache the message so it can be referenced in the future.
# If the message is already cached, this will reset its
# age.
LOGGER.debug("Caching message (hash=%s)" % msg.hash)
self._message_cache.add_message(
msg, session_info.session, session_info.report_run_count
)
# If this was a `report_finished` message, we increment the
# report_run_count for this session, and update the cache
if (
msg.WhichOneof("type") == "report_finished"
and msg.report_finished == ForwardMsg.FINISHED_SUCCESSFULLY
):
LOGGER.debug(
"Report finished successfully; "
"removing expired entries from MessageCache "
"(max_age=%s)",
config.get_option("global.maxCachedMessageAge"),
)
session_info.report_run_count += 1
self._message_cache.remove_expired_session_entries(
session_info.session, session_info.report_run_count
)
# Ship it off!
ws.write_message(serialize_forward_msg(msg_to_send), binary=True)
def stop(self):
click.secho(" Stopping...", fg="blue")
self._set_state(State.STOPPING)
self._must_stop.set()
def _on_stopped(self):
"""Called when our runloop is exiting, to shut down the ioloop.
This will end our process.
(Tests can patch this method out, to prevent the test's ioloop
from being shutdown.)
"""
self._ioloop.stop()
def add_preheated_report_session(self):
"""Register a fake browser with the server and run the script.
This is used to start running the user's script even before the first
browser connects.
"""
session = self._add_browser_connection(PREHEATED_REPORT_SESSION)
session.handle_rerun_script_request(is_preheat=True)
def _add_browser_connection(self, ws):
"""Register a connected browser with the server
Parameters
----------
ws : _BrowserWebSocketHandler or PREHEATED_REPORT_CONTEXT
The newly-connected websocket handler
Returns
-------
ReportSession
The ReportSession associated with this browser connection
"""
if ws not in self._session_infos:
if PREHEATED_REPORT_SESSION in self._session_infos:
assert len(self._session_infos) == 1
LOGGER.debug("Reusing preheated context for ws %s", ws)
session = self._session_infos[PREHEATED_REPORT_SESSION].session
del self._session_infos[PREHEATED_REPORT_SESSION]
else:
LOGGER.debug("Creating new context for ws %s", ws)
session = ReportSession(
ioloop=self._ioloop,
script_path=self._script_path,
command_line=self._command_line,
)
self._session_infos[ws] = SessionInfo(session)
if ws is not PREHEATED_REPORT_SESSION:
self._set_state(State.ONE_OR_MORE_BROWSERS_CONNECTED)
return self._session_infos[ws].session
def _remove_browser_connection(self, ws):
if ws in self._session_infos:
session_info = self._session_infos[ws]
del self._session_infos[ws]
session_info.session.shutdown()
if len(self._session_infos) == 0:
self._set_state(State.NO_BROWSERS_CONNECTED)
class _BrowserWebSocketHandler(tornado.websocket.WebSocketHandler):
"""Handles a WebSocket connection from the browser"""
def initialize(self, server):
self._server = server
def check_origin(self, origin):
"""Set up CORS."""
return is_url_from_allowed_origins(origin)
def open(self):
self._session = self._server._add_browser_connection(self)
def on_close(self):
self._server._remove_browser_connection(self)
@tornado.gen.coroutine
def on_message(self, payload):
msg = BackMsg()
try:
msg.ParseFromString(payload)
LOGGER.debug("Received the following back message:\n%s", msg)
msg_type = msg.WhichOneof("type")
if msg_type == "cloud_upload":
yield self._session.handle_save_request(self)
elif msg_type == "rerun_script":
self._session.handle_rerun_script_request()
elif msg_type == "clear_cache":
self._session.handle_clear_cache_request()
elif msg_type == "set_run_on_save":
self._session.handle_set_run_on_save_request(msg.set_run_on_save)
elif msg_type == "stop_report":
self._session.handle_stop_script_request()
elif msg_type == "update_widgets":
self._session.handle_rerun_script_request(
widget_state=msg.update_widgets
)
elif msg_type == "close_connection":
if config.get_option("global.developmentMode"):
Server.get_current().stop()
else:
LOGGER.warning(
"Client tried to close connection when "
"not in development mode"
)
else:
LOGGER.warning('No handler for "%s"', msg_type)
except BaseException as e:
LOGGER.error(e)
self._session.enqueue_exception(e)
def _set_tornado_log_levels():
if not config.get_option("global.developmentMode"):
# Hide logs unless they're super important.
# Example of stuff we don't care about: 404 about .js.map files.
logging.getLogger("tornado.access").setLevel(logging.ERROR)
logging.getLogger("tornado.application").setLevel(logging.ERROR)
logging.getLogger("tornado.general").setLevel(logging.ERROR)
|
the-stack_106_27362 | import math
import tensorflow as tf
import numpy as np
import dnnlib.tflib as tflib
from functools import partial
def create_stub(name, batch_size):
return tf.constant(0, dtype='float32', shape=(batch_size, 0))
def create_variable_for_generator(name, batch_size, tiled_dlatent, model_scale=18):
if tiled_dlatent:
low_dim_dlatent = tf.get_variable('learnable_dlatents',
shape=(batch_size, 512),
dtype='float32',
initializer=tf.initializers.random_normal())
return tf.tile(tf.expand_dims(low_dim_dlatent, axis=1), [1, model_scale, 1])
else:
return tf.get_variable('learnable_dlatents',
shape=(batch_size, model_scale, 512),
dtype='float32',
initializer=tf.initializers.random_normal())
class Generator:
def __init__(self, model, batch_size, clipping_threshold=2, tiled_dlatent=False, model_res=1024, randomize_noise=False):
self.batch_size = batch_size
self.tiled_dlatent=tiled_dlatent
self.model_scale = int(2*(math.log(model_res,2)-1)) # For example, 1024 -> 18
if tiled_dlatent:
self.initial_dlatents = np.zeros((self.batch_size, 512))
model.components.synthesis.run(np.zeros((self.batch_size, self.model_scale, 512)),
randomize_noise=randomize_noise, minibatch_size=self.batch_size,
custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size, tiled_dlatent=True),
partial(create_stub, batch_size=batch_size)],
structure='fixed')
else:
self.initial_dlatents = np.zeros((self.batch_size, self.model_scale, 512))
model.components.synthesis.run(self.initial_dlatents,
randomize_noise=randomize_noise, minibatch_size=self.batch_size,
custom_inputs=[partial(create_variable_for_generator, batch_size=batch_size, tiled_dlatent=False, model_scale=self.model_scale),
partial(create_stub, batch_size=batch_size)],
structure='fixed')
self.dlatent_avg_def = model.get_var('dlatent_avg')
self.reset_dlatent_avg()
self.sess = tf.get_default_session()
self.graph = tf.get_default_graph()
self.dlatent_variable = next(v for v in tf.global_variables() if 'learnable_dlatents' in v.name)
self.set_dlatents(self.initial_dlatents)
try:
self.generator_output = self.graph.get_tensor_by_name('G_synthesis_1/_Run/concat:0')
except KeyError:
# If we loaded only Gs and didn't load G or D, then scope "G_synthesis_1" won't exist in the graph.
self.generator_output = self.graph.get_tensor_by_name('G_synthesis/_Run/concat:0')
self.generated_image = tflib.convert_images_to_uint8(self.generator_output, nchw_to_nhwc=True, uint8_cast=False)
self.generated_image_uint8 = tf.saturate_cast(self.generated_image, tf.uint8)
# Implement stochastic clipping similar to what is described in https://arxiv.org/abs/1702.04782
# (Slightly different in that the latent space is normal gaussian here and was uniform in [-1, 1] in that paper,
# so we clip any vector components outside of [-2, 2]. It seems fine, but I haven't done an ablation check.)
clipping_mask = tf.math.logical_or(self.dlatent_variable > clipping_threshold, self.dlatent_variable < -clipping_threshold)
clipped_values = tf.where(clipping_mask, tf.random_normal(shape=self.dlatent_variable.shape), self.dlatent_variable)
self.stochastic_clip_op = tf.assign(self.dlatent_variable, clipped_values)
def reset_dlatents(self):
self.set_dlatents(self.initial_dlatents)
def set_dlatents(self, dlatents):
if self.tiled_dlatent:
if (dlatents.shape != (self.batch_size, 512)) and (dlatents.shape[1] != 512):
dlatents = np.mean(dlatents, axis=1)
if (dlatents.shape != (self.batch_size, 512)):
dlatents = np.vstack([dlatents, np.zeros((self.batch_size-dlatents.shape[0], 512))])
assert (dlatents.shape == (self.batch_size, 512))
else:
if (dlatents.shape[1] > self.model_scale):
dlatents = dlatents[:,:self.model_scale,:]
if (dlatents.shape != (self.batch_size, self.model_scale, 512)):
dlatents = np.vstack([dlatents, np.zeros((self.batch_size-dlatents.shape[0], self.model_scale, 512))])
assert (dlatents.shape == (self.batch_size, self.model_scale, 512))
self.sess.run(tf.assign(self.dlatent_variable, dlatents))
def stochastic_clip_dlatents(self):
self.sess.run(self.stochastic_clip_op)
def get_dlatents(self):
return self.sess.run(self.dlatent_variable)
def get_dlatent_avg(self):
return self.dlatent_avg
def set_dlatent_avg(self, dlatent_avg):
self.dlatent_avg = dlatent_avg
def reset_dlatent_avg(self):
self.dlatent_avg = self.dlatent_avg_def
def generate_images(self, dlatents=None):
if dlatents:
self.set_dlatents(dlatents)
return self.sess.run(self.generated_image_uint8)
|
the-stack_106_27363 | # -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20171123135539.1: * @file ../commands/commanderEditCommands.py
#@@first
"""Edit commands that used to be defined in leoCommands.py"""
import re
from typing import List
from leo.core import leoGlobals as g
#@+others
#@+node:ekr.20171123135625.34: ** c_ec.addComments
@g.commander_command('add-comments')
def addComments(self, event=None):
#@+<< addComments docstring >>
#@+node:ekr.20171123135625.35: *3* << addComments docstring >>
#@@pagewidth 50
"""
Converts all selected lines to comment lines using
the comment delimiters given by the applicable @language directive.
Inserts single-line comments if possible; inserts
block comments for languages like html that lack
single-line comments.
@bool indent_added_comments
If True (the default), inserts opening comment
delimiters just before the first non-whitespace
character of each line. Otherwise, inserts opening
comment delimiters at the start of each line.
*See also*: delete-comments.
"""
#@-<< addComments docstring >>
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
#
# Make sure there is a selection.
head, lines, tail, oldSel, oldYview = self.getBodyLines()
if not lines:
g.warning('no text selected')
return
#
# The default language in effect at p.
language = c.frame.body.colorizer.scanLanguageDirectives(p)
if c.hasAmbiguousLanguage(p):
language = c.getLanguageAtCursor(p, language)
d1, d2, d3 = g.set_delims_from_language(language)
d2 = d2 or ''; d3 = d3 or ''
if d1:
openDelim, closeDelim = d1 + ' ', ''
else:
openDelim, closeDelim = d2 + ' ', ' ' + d3
#
# Calculate the result.
indent = c.config.getBool('indent-added-comments', default=True)
result = []
for line in lines:
if line.strip():
i = g.skip_ws(line, 0)
if indent:
s = line[i:].replace('\n', '')
result.append(line[0:i] + openDelim + s + closeDelim + '\n')
else:
s = line.replace('\n', '')
result.append(openDelim + s + closeDelim + '\n')
else:
result.append(line)
#
# Set p.b and w's text first.
middle = ''.join(result)
p.b = head + middle + tail # Sets dirty and changed bits.
w.setAllText(head + middle + tail)
#
# Calculate the proper selection range (i, j, ins).
i = len(head)
j = max(i, len(head) + len(middle) - 1)
#
# Set the selection range and scroll position.
w.setSelectionRange(i, j, insert=j)
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, 'Add Comments', bunch)
#@+node:ekr.20171123135625.3: ** c_ec.colorPanel
@g.commander_command('set-colors')
def colorPanel(self, event=None):
"""Open the color dialog."""
c = self; frame = c.frame
if not frame.colorPanel:
frame.colorPanel = g.app.gui.createColorPanel(c)
frame.colorPanel.bringToFront()
#@+node:ekr.20171123135625.16: ** c_ec.convertAllBlanks
@g.commander_command('convert-all-blanks')
def convertAllBlanks(self, event=None):
"""Convert all blanks to tabs in the selected outline."""
c = self; u = c.undoer; undoType = 'Convert All Blanks'
current = c.p
if g.app.batchMode:
c.notValidInBatchMode(undoType)
return
d = c.scanAllDirectives(c.p)
tabWidth = d.get("tabwidth")
count = 0
u.beforeChangeGroup(current, undoType)
for p in current.self_and_subtree():
innerUndoData = u.beforeChangeNodeContents(p)
if p == current:
changed = c.convertBlanks(event)
if changed:
count += 1
else:
changed = False; result = []
text = p.v.b
lines = text.split('\n')
for line in lines:
i, w = g.skip_leading_ws_with_indent(line, 0, tabWidth)
s = g.computeLeadingWhitespace(
w, abs(tabWidth)) + line[i:] # use positive width.
if s != line: changed = True
result.append(s)
if changed:
count += 1
p.setDirty()
p.setBodyString('\n'.join(result))
u.afterChangeNodeContents(p, undoType, innerUndoData)
u.afterChangeGroup(current, undoType)
if not g.unitTesting:
g.es("blanks converted to tabs in", count, "nodes")
# Must come before c.redraw().
if count > 0:
c.redraw_after_icons_changed()
#@+node:ekr.20171123135625.17: ** c_ec.convertAllTabs
@g.commander_command('convert-all-tabs')
def convertAllTabs(self, event=None):
"""Convert all tabs to blanks in the selected outline."""
c = self; u = c.undoer; undoType = 'Convert All Tabs'
current = c.p
if g.app.batchMode:
c.notValidInBatchMode(undoType)
return
theDict = c.scanAllDirectives(c.p)
tabWidth = theDict.get("tabwidth")
count = 0
u.beforeChangeGroup(current, undoType)
for p in current.self_and_subtree():
undoData = u.beforeChangeNodeContents(p)
if p == current:
changed = self.convertTabs(event)
if changed:
count += 1
else:
result = []
changed = False
text = p.v.b
lines = text.split('\n')
for line in lines:
i, w = g.skip_leading_ws_with_indent(line, 0, tabWidth)
s = g.computeLeadingWhitespace(
w, -abs(tabWidth)) + line[i:] # use negative width.
if s != line: changed = True
result.append(s)
if changed:
count += 1
p.setDirty()
p.setBodyString('\n'.join(result))
u.afterChangeNodeContents(p, undoType, undoData)
u.afterChangeGroup(current, undoType)
if not g.unitTesting:
g.es("tabs converted to blanks in", count, "nodes")
if count > 0:
c.redraw_after_icons_changed()
#@+node:ekr.20171123135625.18: ** c_ec.convertBlanks
@g.commander_command('convert-blanks')
def convertBlanks(self, event=None):
"""
Convert *all* blanks to tabs in the selected node.
Return True if the the p.b was changed.
"""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
oldYview = w.getYScrollPosition()
w.selectAllText()
head, lines, tail, oldSel, oldYview = c.getBodyLines()
#
# Use the relative @tabwidth, not the global one.
d = c.scanAllDirectives(p)
tabWidth = d.get("tabwidth")
if not tabWidth:
return False
#
# Calculate the result.
changed, result = False, []
for line in lines:
s = g.optimizeLeadingWhitespace(line, abs(tabWidth)) # Use positive width.
if s != line:
changed = True
result.append(s)
if not changed:
return False
#
# Set p.b and w's text first.
middle = ''.join(result)
p.b = head + middle + tail # Sets dirty and changed bits.
w.setAllText(head + middle + tail)
#
# Select all text and set scroll position.
w.selectAllText()
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, 'Indent Region', bunch)
return True
#@+node:ekr.20171123135625.19: ** c_ec.convertTabs
@g.commander_command('convert-tabs')
def convertTabs(self, event=None):
"""Convert all tabs to blanks in the selected node."""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
#
# Data...
w.selectAllText()
head, lines, tail, oldSel, oldYview = self.getBodyLines()
# Use the relative @tabwidth, not the global one.
theDict = c.scanAllDirectives(p)
tabWidth = theDict.get("tabwidth")
if not tabWidth:
return False
#
# Calculate the result.
changed, result = False, []
for line in lines:
i, width = g.skip_leading_ws_with_indent(line, 0, tabWidth)
s = g.computeLeadingWhitespace(width, -abs(tabWidth)) + line[i:]
# use negative width.
if s != line: changed = True
result.append(s)
if not changed:
return False
#
# Set p.b and w's text first.
middle = ''.join(result)
p.b = head + middle + tail # Sets dirty and changed bits.
w.setAllText(head + middle + tail)
#
# Calculate the proper selection range (i, j, ins).
i = len(head)
j = max(i, len(head) + len(middle) - 1)
#
# Set the selection range and scroll position.
w.setSelectionRange(i, j, insert=j)
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, 'Add Comments', bunch)
return True
#@+node:ekr.20171123135625.21: ** c_ec.dedentBody (unindent-region)
@g.commander_command('unindent-region')
def dedentBody(self, event=None):
"""Remove one tab's worth of indentation from all presently selected lines."""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# Initial data.
sel_1, sel_2 = w.getSelectionRange()
tab_width = c.getTabWidth(c.p)
head, lines, tail, oldSel, oldYview = self.getBodyLines()
bunch = u.beforeChangeBody(p)
#
# Calculate the result.
changed, result = False, []
for line in lines:
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
s = g.computeLeadingWhitespace(width - abs(tab_width), tab_width) + line[i:]
if s != line:
changed = True
result.append(s)
if not changed:
return
#
# Set p.b and w's text first.
middle = ''.join(result)
all = head + middle + tail
p.b = all # Sets dirty and changed bits.
w.setAllText(all)
#
# Calculate the proper selection range (i, j, ins).
if sel_1 == sel_2:
line = result[0]
ins, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
i = j = len(head) + ins
else:
i = len(head)
j = len(head) + len(middle)
if middle.endswith('\n'): # #1742.
j -= 1
#
# Set the selection range and scroll position.
w.setSelectionRange(i, j, insert=j)
w.setYScrollPosition(oldYview)
u.afterChangeBody(p, 'Unindent Region', bunch)
#@+node:ekr.20171123135625.36: ** c_ec.deleteComments
@g.commander_command('delete-comments')
def deleteComments(self, event=None):
#@+<< deleteComments docstring >>
#@+node:ekr.20171123135625.37: *3* << deleteComments docstring >>
#@@pagewidth 50
"""
Removes one level of comment delimiters from all
selected lines. The applicable @language directive
determines the comment delimiters to be removed.
Removes single-line comments if possible; removes
block comments for languages like html that lack
single-line comments.
*See also*: add-comments.
"""
#@-<< deleteComments docstring >>
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
#
# Initial data.
head, lines, tail, oldSel, oldYview = self.getBodyLines()
if not lines:
g.warning('no text selected')
return
# The default language in effect at p.
language = c.frame.body.colorizer.scanLanguageDirectives(p)
if c.hasAmbiguousLanguage(p):
language = c.getLanguageAtCursor(p, language)
d1, d2, d3 = g.set_delims_from_language(language)
#
# Calculate the result.
changed, result = False, []
if d1:
# Remove the single-line comment delim in front of each line
d1b = d1 + ' '
n1, n1b = len(d1), len(d1b)
for s in lines:
i = g.skip_ws(s, 0)
if g.match(s, i, d1b):
result.append(s[:i] + s[i + n1b :])
changed = True
elif g.match(s, i, d1):
result.append(s[:i] + s[i + n1 :])
changed = True
else:
result.append(s)
else:
# Remove the block comment delimiters from each line.
n2, n3 = len(d2), len(d3)
for s in lines:
i = g.skip_ws(s, 0)
j = s.find(d3, i + n2)
if g.match(s, i, d2) and j > -1:
first = i + n2
if g.match(s, first, ' '):
first += 1
last = j
if g.match(s, last - 1, ' '):
last -= 1
result.append(s[:i] + s[first:last] + s[j + n3 :])
changed = True
else:
result.append(s)
if not changed:
return
#
# Set p.b and w's text first.
middle = ''.join(result)
p.b = head + middle + tail # Sets dirty and changed bits.
w.setAllText(head + middle + tail)
#
# Set the selection range and scroll position.
i = len(head)
j = ins = max(i, len(head) + len(middle) - 1)
w.setSelectionRange(i, j, insert=ins)
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, 'Indent Region', bunch)
#@+node:ekr.20171123135625.54: ** c_ec.editHeadline (edit-headline)
@g.commander_command('edit-headline')
def editHeadline(self, event=None):
"""
Begin editing the headline of the selected node.
This is just a wrapper around tree.editLabel.
"""
c = self
k, tree = c.k, c.frame.tree
if g.app.batchMode:
c.notValidInBatchMode("Edit Headline")
return None, None
e, wrapper = tree.editLabel(c.p)
if k:
# k.setDefaultInputState()
k.setEditingState()
k.showStateAndMode(w=wrapper)
return e, wrapper
# Neither of these is used by any caller.
#@+node:ekr.20171123135625.23: ** c_ec.extract & helpers
@g.commander_command('extract')
def extract(self, event=None):
#@+<< docstring for extract command >>
#@+node:ekr.20201113130021.1: *3* << docstring for extract command >>
r"""
Create child node from the selected body text.
1. If the selection starts with a section reference, the section
name becomes the child's headline. All following lines become
the child's body text. The section reference line remains in
the original body text.
2. If the selection looks like a definition line (for the Python,
JavaScript, CoffeeScript or Clojure languages) the
class/function/method name becomes the child's headline and all
selected lines become the child's body text.
You may add additional regex patterns for definition lines using
@data extract-patterns nodes. Each line of the body text should a
valid regex pattern. Lines starting with # are comment lines. Use \#
for patterns starting with #.
3. Otherwise, the first line becomes the child's headline, and all
selected lines become the child's body text.
"""
#@-<< docstring for extract command >>
c, u, w = self, self.undoer, self.frame.body.wrapper
undoType = 'Extract'
# Set data.
head, lines, tail, oldSel, oldYview = c.getBodyLines()
if not lines:
return # Nothing selected.
#
# Remove leading whitespace.
junk, ws = g.skip_leading_ws_with_indent(lines[0], 0, c.tab_width)
lines = [g.removeLeadingWhitespace(s, ws, c.tab_width) for s in lines]
h = lines[0].strip()
ref_h = extractRef(c, h).strip()
def_h = extractDef_find(c, lines)
if ref_h:
h, b, middle = ref_h, lines[1:], ' ' * ws + lines[0] # By vitalije.
elif def_h:
h, b, middle = def_h, lines, ''
else:
h, b, middle = lines[0].strip(), lines[1:], ''
#
# Start the outer undo group.
u.beforeChangeGroup(c.p, undoType)
undoData = u.beforeInsertNode(c.p)
p = createLastChildNode(c, c.p, h, ''.join(b))
u.afterInsertNode(p, undoType, undoData)
#
# Start inner undo.
if oldSel:
i, j = oldSel
w.setSelectionRange(i, j, insert=j)
bunch = u.beforeChangeBody(c.p) # Not p.
#
# Update the text and selection
c.p.v.b = head + middle + tail # Don't redraw.
w.setAllText(head + middle + tail)
i = len(head)
j = max(i, len(head) + len(middle) - 1)
w.setSelectionRange(i, j, insert=j)
#
# End the inner undo.
u.afterChangeBody(c.p, undoType, bunch)
#
# Scroll as necessary.
if oldYview:
w.setYScrollPosition(oldYview)
else:
w.seeInsertPoint()
#
# Add the changes to the outer undo group.
u.afterChangeGroup(c.p, undoType=undoType)
p.parent().expand()
c.redraw(p.parent()) # A bit more convenient than p.
c.bodyWantsFocus()
# Compatibility
g.command_alias('extractSection', extract)
g.command_alias('extractPythonMethod', extract)
#@+node:ekr.20171123135625.20: *3* def createLastChildNode
def createLastChildNode(c, parent, headline, body):
"""A helper function for the three extract commands."""
# #1955: don't strip trailing lines.
if not body:
body = ""
p = parent.insertAsLastChild()
p.initHeadString(headline)
p.setBodyString(body)
p.setDirty()
c.validateOutline()
return p
#@+node:ekr.20171123135625.24: *3* def extractDef
extractDef_patterns = (
re.compile(
r'\((?:def|defn|defui|deftype|defrecord|defonce)\s+(\S+)'), # clojure definition
re.compile(r'^\s*(?:def|class)\s+(\w+)'), # python definitions
re.compile(r'^\bvar\s+(\w+)\s*=\s*function\b'), # js function
re.compile(r'^(?:export\s)?\s*function\s+(\w+)\s*\('), # js function
re.compile(r'\b(\w+)\s*:\s*function\s'), # js function
re.compile(r'\.(\w+)\s*=\s*function\b'), # js function
re.compile(r'(?:export\s)?\b(\w+)\s*=\s(?:=>|->)'), # coffeescript function
re.compile(
r'(?:export\s)?\b(\w+)\s*=\s(?:\([^)]*\))\s*(?:=>|->)'), # coffeescript function
re.compile(r'\b(\w+)\s*:\s(?:=>|->)'), # coffeescript function
re.compile(r'\b(\w+)\s*:\s(?:\([^)]*\))\s*(?:=>|->)'), # coffeescript function
)
def extractDef(c, s):
"""
Return the defined function/method/class name if s
looks like definition. Tries several different languages.
"""
for pat in c.config.getData('extract-patterns') or []:
try:
pat = re.compile(pat)
m = pat.search(s)
if m: return m.group(1)
except Exception:
g.es_print('bad regex in @data extract-patterns', color='blue')
g.es_print(pat)
for pat in extractDef_patterns:
m = pat.search(s)
if m: return m.group(1)
return ''
#@+node:ekr.20171123135625.26: *3* def extractDef_find
def extractDef_find(c, lines):
for line in lines:
def_h = extractDef(c, line.strip())
if def_h:
return def_h
return None
#@+node:ekr.20171123135625.25: *3* def extractRef
def extractRef(c, s):
"""Return s if it starts with a section name."""
i = s.find('<<')
j = s.find('>>')
if -1 < i < j:
return s
i = s.find('@<')
j = s.find('@>')
if -1 < i < j:
return s
return ''
#@+node:ekr.20171123135625.27: ** c_ec.extractSectionNames & helper
@g.commander_command('extract-names')
def extractSectionNames(self, event=None):
"""
Create child nodes for every section reference in the selected text.
- The headline of each new child node is the section reference.
- The body of each child node is empty.
"""
c = self
current = c.p
u = c.undoer
undoType = 'Extract Section Names'
body = c.frame.body
head, lines, tail, oldSel, oldYview = c.getBodyLines()
if not lines:
g.warning('No lines selected')
return
u.beforeChangeGroup(current, undoType)
found = False
for s in lines:
name = findSectionName(c, s)
if name:
undoData = u.beforeInsertNode(current)
p = createLastChildNode(c, current, name, None)
u.afterInsertNode(p, undoType, undoData)
found = True
c.validateOutline()
if found:
u.afterChangeGroup(current, undoType)
c.redraw(p)
else:
g.warning("selected text should contain section names")
# Restore the selection.
i, j = oldSel
w = body.wrapper
if w:
w.setSelectionRange(i, j)
w.setFocus()
#@+node:ekr.20171123135625.28: *3* def findSectionName
def findSectionName(self, s):
head1 = s.find("<<")
if head1 > -1:
head2 = s.find(">>", head1)
else:
head1 = s.find("@<")
if head1 > -1:
head2 = s.find("@>", head1)
if head1 == -1 or head2 == -1 or head1 > head2:
name = None
else:
name = s[head1 : head2 + 2]
return name
#@+node:ekr.20171123135625.15: ** c_ec.findMatchingBracket
@g.commander_command('match-brackets')
@g.commander_command('select-to-matching-bracket')
def findMatchingBracket(self, event=None):
"""Select the text between matching brackets."""
c, p = self, self.p
if g.app.batchMode:
c.notValidInBatchMode("Match Brackets")
return
language = g.getLanguageAtPosition(c, p)
if language == 'perl':
g.es('match-brackets not supported for', language)
else:
g.MatchBrackets(c, p, language).run()
#@+node:ekr.20171123135625.9: ** c_ec.fontPanel
@g.commander_command('set-font')
def fontPanel(self, event=None):
"""Open the font dialog."""
c = self; frame = c.frame
if not frame.fontPanel:
frame.fontPanel = g.app.gui.createFontPanel(c)
frame.fontPanel.bringToFront()
#@+node:ekr.20110402084740.14490: ** c_ec.goToNext/PrevHistory
@g.commander_command('goto-next-history-node')
def goToNextHistory(self, event=None):
"""Go to the next node in the history list."""
c = self
c.nodeHistory.goNext()
@g.commander_command('goto-prev-history-node')
def goToPrevHistory(self, event=None):
"""Go to the previous node in the history list."""
c = self
c.nodeHistory.goPrev()
#@+node:ekr.20171123135625.30: ** c_ec.alwaysIndentBody (always-indent-region)
@g.commander_command('always-indent-region')
def alwaysIndentBody(self, event=None):
"""
The always-indent-region command indents each line of the selected body
text. The @tabwidth directive in effect determines amount of
indentation.
"""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# #1801: Don't rely on bindings to ensure that we are editing the body.
event_w = event and event.w
if event_w != w:
c.insertCharFromEvent(event)
return
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
#
# Initial data.
sel_1, sel_2 = w.getSelectionRange()
tab_width = c.getTabWidth(p)
head, lines, tail, oldSel, oldYview = self.getBodyLines()
#
# Calculate the result.
changed, result = False, []
for line in lines:
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
s = g.computeLeadingWhitespace(width + abs(tab_width), tab_width) + line[i:]
if s != line:
changed = True
result.append(s)
if not changed:
return
#
# Set p.b and w's text first.
middle = ''.join(result)
all = head + middle + tail
p.b = all # Sets dirty and changed bits.
w.setAllText(all)
#
# Calculate the proper selection range (i, j, ins).
if sel_1 == sel_2:
line = result[0]
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
i = j = len(head) + i
else:
i = len(head)
j = len(head) + len(middle)
if middle.endswith('\n'): # #1742.
j -= 1
#
# Set the selection range and scroll position.
w.setSelectionRange(i, j, insert=j)
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, 'Indent Region', bunch)
#@+node:ekr.20210104123442.1: ** c_ec.indentBody (indent-region)
@g.commander_command('indent-region')
def indentBody(self, event=None):
"""
The indent-region command indents each line of the selected body text.
Unlike the always-indent-region command, this command inserts a tab
(soft or hard) when there is no selected text.
The @tabwidth directive in effect determines amount of indentation.
"""
c, event_w, w = self, event and event.w, self.frame.body.wrapper
# #1801: Don't rely on bindings to ensure that we are editing the body.
if event_w != w:
c.insertCharFromEvent(event)
return
# # 1739. Special case for a *plain* tab bound to indent-region.
sel_1, sel_2 = w.getSelectionRange()
if sel_1 == sel_2:
char = getattr(event, 'char', None)
stroke = getattr(event, 'stroke', None)
if char == '\t' and stroke and stroke.isPlainKey():
c.editCommands.selfInsertCommand(event) # Handles undo.
return
c.alwaysIndentBody(event)
#@+node:ekr.20171123135625.38: ** c_ec.insertBodyTime
@g.commander_command('insert-body-time')
def insertBodyTime(self, event=None):
"""Insert a time/date stamp at the cursor."""
c = self; undoType = 'Insert Body Time'
w = c.frame.body.wrapper
if g.app.batchMode:
c.notValidInBatchMode(undoType)
return
oldSel = w.getSelectionRange()
w.deleteTextSelection()
s = self.getTime(body=True)
i = w.getInsertPoint()
w.insert(i, s)
c.frame.body.onBodyChanged(undoType, oldSel=oldSel)
#@+node:ekr.20171123135625.52: ** c_ec.justify-toggle-auto
@g.commander_command("justify-toggle-auto")
def justify_toggle_auto(self, event=None):
c = self
if c.editCommands.autojustify == 0:
c.editCommands.autojustify = abs(c.config.getInt("autojustify") or 0)
if c.editCommands.autojustify:
g.es(f"Autojustify on, @int autojustify == {c.editCommands.autojustify}")
else:
g.es("Set @int autojustify in @settings")
else:
c.editCommands.autojustify = 0
g.es("Autojustify off")
#@+node:ekr.20190210095609.1: ** c_ec.line_to_headline
@g.commander_command('line-to-headline')
def line_to_headline(self, event=None):
"""
Create child node from the selected line.
Cut the selected line and make it the new node's headline
"""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
undoType = 'line-to-headline'
ins, s = w.getInsertPoint(), p.b
i = g.find_line_start(s, ins)
j = g.skip_line(s, i)
line = s[i:j].strip()
if not line:
return
u.beforeChangeGroup(p, undoType)
#
# Start outer undo.
undoData = u.beforeInsertNode(p)
p2 = p.insertAsLastChild()
p2.h = line
u.afterInsertNode(p2, undoType, undoData)
#
# "before" snapshot.
bunch = u.beforeChangeBody(p)
p.b = s[:i] + s[j:]
w.setInsertPoint(i)
p2.setDirty()
c.setChanged()
#
# "after" snapshot.
u.afterChangeBody(p, undoType, bunch)
#
# Finish outer undo.
u.afterChangeGroup(p, undoType=undoType)
c.redraw_after_icons_changed()
p.expand()
c.redraw(p)
c.bodyWantsFocus()
#@+node:ekr.20171123135625.11: ** c_ec.preferences
@g.commander_command('settings')
def preferences(self, event=None):
"""Handle the preferences command."""
c = self
c.openLeoSettings()
#@+node:ekr.20171123135625.40: ** c_ec.reformatBody
@g.commander_command('reformat-body')
def reformatBody(self, event=None):
"""Reformat all paragraphs in the body."""
c, p = self, self.p
undoType = 'reformat-body'
w = c.frame.body.wrapper
c.undoer.beforeChangeGroup(p, undoType)
w.setInsertPoint(0)
while 1:
progress = w.getInsertPoint()
c.reformatParagraph(event, undoType=undoType)
ins = w.getInsertPoint()
s = w.getAllText()
w.setInsertPoint(ins)
if ins <= progress or ins >= len(s):
break
c.undoer.afterChangeGroup(p, undoType)
#@+node:ekr.20171123135625.41: ** c_ec.reformatParagraph & helpers
@g.commander_command('reformat-paragraph')
def reformatParagraph(self, event=None, undoType='Reformat Paragraph'):
"""
Reformat a text paragraph
Wraps the concatenated text to present page width setting. Leading tabs are
sized to present tab width setting. First and second line of original text is
used to determine leading whitespace in reformatted text. Hanging indentation
is honored.
Paragraph is bound by start of body, end of body and blank lines. Paragraph is
selected by position of current insertion cursor.
"""
c, w = self, self.frame.body.wrapper
if g.app.batchMode:
c.notValidInBatchMode("reformat-paragraph")
return
# Set the insertion point for find_bound_paragraph.
if w.hasSelection():
i, j = w.getSelectionRange()
w.setInsertPoint(i)
head, lines, tail = find_bound_paragraph(c)
if not lines:
return
oldSel, oldYview, original, pageWidth, tabWidth = rp_get_args(c)
indents, leading_ws = rp_get_leading_ws(c, lines, tabWidth)
result = rp_wrap_all_lines(c, indents, leading_ws, lines, pageWidth)
rp_reformat(c, head, oldSel, oldYview, original, result, tail, undoType)
#@+node:ekr.20171123135625.43: *3* function: ends_paragraph & single_line_paragraph
def ends_paragraph(s):
"""Return True if s is a blank line."""
return not s.strip()
def single_line_paragraph(s):
"""Return True if s is a single-line paragraph."""
return s.startswith('@') or s.strip() in ('"""', "'''")
#@+node:ekr.20171123135625.42: *3* function: find_bound_paragraph
def find_bound_paragraph(c):
"""
Return the lines of a paragraph to be reformatted.
This is a convenience method for the reformat-paragraph command.
"""
head, ins, tail = c.frame.body.getInsertLines()
head_lines = g.splitLines(head)
tail_lines = g.splitLines(tail)
result = []
insert_lines = g.splitLines(ins)
para_lines = insert_lines + tail_lines
# If the present line doesn't start a paragraph,
# scan backward, adding trailing lines of head to ins.
if insert_lines and not startsParagraph(insert_lines[0]):
n = 0 # number of moved lines.
for i, s in enumerate(reversed(head_lines)):
if ends_paragraph(s) or single_line_paragraph(s):
break
elif startsParagraph(s):
n += 1
break
else: n += 1
if n > 0:
para_lines = head_lines[-n :] + para_lines
head_lines = head_lines[: -n]
ended, started = False, False
for i, s in enumerate(para_lines):
if started:
if ends_paragraph(s) or startsParagraph(s):
ended = True
break
else:
result.append(s)
elif s.strip():
result.append(s)
started = True
if ends_paragraph(s) or single_line_paragraph(s):
i += 1
ended = True
break
else:
head_lines.append(s)
if started:
head = g.joinLines(head_lines)
tail_lines = para_lines[i:] if ended else []
tail = g.joinLines(tail_lines)
return head, result, tail # string, list, string
return None, None, None
#@+node:ekr.20171123135625.45: *3* function: rp_get_args
def rp_get_args(c):
"""Compute and return oldSel,oldYview,original,pageWidth,tabWidth."""
body = c.frame.body
w = body.wrapper
d = c.scanAllDirectives(c.p)
if c.editCommands.fillColumn > 0:
pageWidth = c.editCommands.fillColumn
else:
pageWidth = d.get("pagewidth")
tabWidth = d.get("tabwidth")
original = w.getAllText()
oldSel = w.getSelectionRange()
oldYview = w.getYScrollPosition()
return oldSel, oldYview, original, pageWidth, tabWidth
#@+node:ekr.20171123135625.46: *3* function: rp_get_leading_ws
def rp_get_leading_ws(c, lines, tabWidth):
"""Compute and return indents and leading_ws."""
# c = self
indents = [0, 0]
leading_ws = ["", ""]
for i in (0, 1):
if i < len(lines):
# Use the original, non-optimized leading whitespace.
leading_ws[i] = ws = g.get_leading_ws(lines[i])
indents[i] = g.computeWidth(ws, tabWidth)
indents[1] = max(indents)
if len(lines) == 1:
leading_ws[1] = leading_ws[0]
return indents, leading_ws
#@+node:ekr.20171123135625.47: *3* function: rp_reformat
def rp_reformat(c, head, oldSel, oldYview, original, result, tail, undoType):
"""Reformat the body and update the selection."""
p, u, w = c.p, c.undoer, c.frame.body.wrapper
s = head + result + tail
changed = original != s
bunch = u.beforeChangeBody(p)
if changed:
w.setAllText(s) # Destroys coloring.
#
# #1748: Always advance to the next paragraph.
i = len(head)
j = max(i, len(head) + len(result) - 1)
ins = j + 1
while ins < len(s):
i, j = g.getLine(s, ins)
line = s[i:j]
# It's annoying, imo, to treat @ lines differently.
if line.isspace():
ins = j + 1
else:
ins = i
break
ins = min(ins, len(s))
w.setSelectionRange(ins, ins, insert=ins)
#
# Show more lines, if they exist.
k = g.see_more_lines(s, ins, 4)
p.v.insertSpot = ins
w.see(k) # New in 6.4. w.see works!
if not changed:
return
#
# Finish.
p.v.b = s # p.b would cause a redraw.
u.afterChangeBody(p, undoType, bunch)
w.setXScrollPosition(0) # Never scroll horizontally.
#@+node:ekr.20171123135625.48: *3* function: rp_wrap_all_lines
def rp_wrap_all_lines(c, indents, leading_ws, lines, pageWidth):
"""Compute the result of wrapping all lines."""
trailingNL = lines and lines[-1].endswith('\n')
lines = [z[:-1] if z.endswith('\n') else z for z in lines]
if lines: # Bug fix: 2013/12/22.
s = lines[0]
if startsParagraph(s):
# Adjust indents[1]
# Similar to code in startsParagraph(s)
i = 0
if s[0].isdigit():
while i < len(s) and s[i].isdigit():
i += 1
if g.match(s, i, ')') or g.match(s, i, '.'):
i += 1
elif s[0].isalpha():
if g.match(s, 1, ')') or g.match(s, 1, '.'):
i = 2
elif s[0] == '-':
i = 1
# Never decrease indentation.
i = g.skip_ws(s, i + 1)
if i > indents[1]:
indents[1] = i
leading_ws[1] = ' ' * i
# Wrap the lines, decreasing the page width by indent.
result_list = g.wrap_lines(lines,
pageWidth - indents[1],
pageWidth - indents[0])
# prefix with the leading whitespace, if any
paddedResult = []
paddedResult.append(leading_ws[0] + result_list[0])
for line in result_list[1:]:
paddedResult.append(leading_ws[1] + line)
# Convert the result to a string.
result = '\n'.join(paddedResult)
if trailingNL:
result = result + '\n'
return result
#@+node:ekr.20171123135625.44: *3* function: startsParagraph
def startsParagraph(s):
"""Return True if line s starts a paragraph."""
if not s.strip():
val = False
elif s.strip() in ('"""', "'''"):
val = True
elif s[0].isdigit():
i = 0
while i < len(s) and s[i].isdigit():
i += 1
val = g.match(s, i, ')') or g.match(s, i, '.')
elif s[0].isalpha():
# Careful: single characters only.
# This could cause problems in some situations.
val = (
(g.match(s, 1, ')') or g.match(s, 1, '.')) and
(len(s) < 2 or s[2] in ' \t\n'))
else:
val = s.startswith('@') or s.startswith('-')
return val
#@+node:ekr.20201124191844.1: ** c_ec.reformatSelection
@g.commander_command('reformat-selection')
def reformatSelection(self, event=None, undoType='Reformat Paragraph'):
"""
Reformat the selected text, as in reformat-paragraph, but without
expanding the selection past the selected lines.
"""
c, undoType = self, 'reformat-selection'
p, u, w = c.p, c.undoer, c.frame.body.wrapper
if g.app.batchMode:
c.notValidInBatchMode(undoType)
return
bunch = u.beforeChangeBody(p)
oldSel, oldYview, original, pageWidth, tabWidth = rp_get_args(c)
head, middle, tail = c.frame.body.getSelectionLines()
lines = g.splitLines(middle)
if not lines:
return
indents, leading_ws = rp_get_leading_ws(c, lines, tabWidth)
result = rp_wrap_all_lines(c, indents, leading_ws, lines, pageWidth)
s = head + result + tail
if s == original:
return
#
# Update the text and the selection.
w.setAllText(s) # Destroys coloring.
i = len(head)
j = max(i, len(head) + len(result) - 1)
j = min(j, len(s))
w.setSelectionRange(i, j, insert=j)
#
# Finish.
p.v.b = s # p.b would cause a redraw.
u.afterChangeBody(p, undoType, bunch)
w.setXScrollPosition(0) # Never scroll horizontally.
#@+node:ekr.20171123135625.12: ** c_ec.show/hide/toggleInvisibles
@g.commander_command('hide-invisibles')
def hideInvisibles(self, event=None):
"""Hide invisible (whitespace) characters."""
c = self
showInvisiblesHelper(c, False)
@g.commander_command('show-invisibles')
def showInvisibles(self, event=None):
"""Show invisible (whitespace) characters."""
c = self
showInvisiblesHelper(c, True)
@g.commander_command('toggle-invisibles')
def toggleShowInvisibles(self, event=None):
"""Toggle showing of invisible (whitespace) characters."""
c = self
colorizer = c.frame.body.getColorizer()
showInvisiblesHelper(c, not colorizer.showInvisibles)
def showInvisiblesHelper(c, val):
frame = c.frame
colorizer = frame.body.getColorizer()
colorizer.showInvisibles = val
colorizer.highlighter.showInvisibles = val
# It is much easier to change the menu name here than in the menu updater.
menu = frame.menu.getMenu("Edit")
index = frame.menu.getMenuLabel(menu,
'Hide Invisibles' if val else 'Show Invisibles')
if index is None:
if val: frame.menu.setMenuLabel(menu, "Show Invisibles", "Hide Invisibles")
else: frame.menu.setMenuLabel(menu, "Hide Invisibles", "Show Invisibles")
# #240: Set the status bits here.
if hasattr(frame.body, 'set_invisibles'):
frame.body.set_invisibles(c)
c.frame.body.recolor(c.p)
#@+node:ekr.20171123135625.55: ** c_ec.toggleAngleBrackets
@g.commander_command('toggle-angle-brackets')
def toggleAngleBrackets(self, event=None):
"""Add or remove double angle brackets from the headline of the selected node."""
c = self; p = c.p
if g.app.batchMode:
c.notValidInBatchMode("Toggle Angle Brackets")
return
c.endEditing()
s = p.h.strip()
# 2019/09/12: Guard against black.
lt = "<<"
rt = ">>"
if s[0:2] == lt or s[-2:] == rt:
if s[0:2] == "<<": s = s[2:]
if s[-2:] == ">>": s = s[:-2]
s = s.strip()
else:
s = g.angleBrackets(' ' + s + ' ')
p.setHeadString(s)
p.setDirty() # #1449.
c.setChanged() # #1449.
c.redrawAndEdit(p, selectAll=True)
#@+node:ekr.20171123135625.49: ** c_ec.unformatParagraph & helper
@g.commander_command('unformat-paragraph')
def unformatParagraph(self, event=None, undoType='Unformat Paragraph'):
"""
Unformat a text paragraph. Removes all extra whitespace in a paragraph.
Paragraph is bound by start of body, end of body and blank lines. Paragraph is
selected by position of current insertion cursor.
"""
c = self
body = c.frame.body
w = body.wrapper
if g.app.batchMode:
c.notValidInBatchMode("unformat-paragraph")
return
if w.hasSelection():
i, j = w.getSelectionRange()
w.setInsertPoint(i)
oldSel, oldYview, original, pageWidth, tabWidth = rp_get_args(c)
head, lines, tail = find_bound_paragraph(c)
if lines:
result = ' '.join([z.strip() for z in lines]) + '\n'
unreformat(c, head, oldSel, oldYview, original, result, tail, undoType)
#@+node:ekr.20171123135625.50: *3* function: unreformat
def unreformat(c, head, oldSel, oldYview, original, result, tail, undoType):
"""unformat the body and update the selection."""
body, w = c.frame.body, c.frame.body.wrapper
s = head + result + tail
ins = max(len(head), len(head) + len(result) - 1)
w.setAllText(s) # Destroys coloring.
changed = original != s
if changed:
body.onBodyChanged(undoType, oldSel=oldSel)
# Advance to the next paragraph.
ins += 1 # Move past the selection.
while ins < len(s):
i, j = g.getLine(s, ins)
line = s[i:j]
if line.isspace():
ins = j + 1
else:
ins = i
break
c.recolor() # Required.
w.setSelectionRange(ins, ins, insert=ins)
# More useful than for reformat-paragraph.
w.see(ins)
# Make sure we never scroll horizontally.
w.setXScrollPosition(0)
#@+node:ekr.20180410054716.1: ** c_ec: insert-jupyter-toc & insert-markdown-toc
@g.commander_command('insert-jupyter-toc')
def insertJupyterTOC(self, event=None):
"""
Insert a Jupyter table of contents at the cursor,
replacing any selected text.
"""
insert_toc(c=self, kind='jupyter')
@g.commander_command('insert-markdown-toc')
def insertMarkdownTOC(self, event=None):
"""
Insert a Markdown table of contents at the cursor,
replacing any selected text.
"""
insert_toc(c=self, kind='markdown')
#@+node:ekr.20180410074238.1: *3* insert_toc
def insert_toc(c, kind):
"""Insert a table of contents at the cursor."""
undoType = f"Insert {kind.capitalize()} TOC"
w = c.frame.body.wrapper
if g.app.batchMode:
c.notValidInBatchMode(undoType)
return
oldSel = w.getSelectionRange()
w.deleteTextSelection()
s = make_toc(c, kind=kind, root=c.p)
i = w.getInsertPoint()
w.insert(i, s)
c.frame.body.onBodyChanged(undoType, oldSel=oldSel)
#@+node:ekr.20180410054926.1: *3* make_toc
def make_toc(c, kind, root):
"""Return the toc for root.b as a list of lines."""
def cell_type(p):
language = g.getLanguageAtPosition(c, p)
return 'markdown' if language in ('jupyter', 'markdown') else 'python'
def clean_headline(s):
# Surprisingly tricky. This could remove too much, but better to be safe.
aList = [ch for ch in s if ch in '-: ' or ch.isalnum()]
return ''.join(aList).rstrip('-').strip()
result: List[str] = []
stack: List[int] = []
for p in root.subtree():
if cell_type(p) == 'markdown':
level = p.level() - root.level()
if len(stack) < level:
stack.append(1)
else:
stack = stack[:level]
n = stack[-1]
stack[-1] = n + 1
# Use bullets
title = clean_headline(p.h)
url = clean_headline(p.h.replace(' ', '-'))
if kind == 'markdown':
url = url.lower()
line = f"{' ' * 4 * (level - 1)}- [{title}](#{url})\n"
result.append(line)
if result:
result.append('\n')
return ''.join(result)
#@-others
#@-leo
|
the-stack_106_27364 | """OpenAQ Air Quality Dashboard with Flask."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import openaq
import aq_functions
APP = Flask(__name__)
APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
DB = SQLAlchemy(APP)
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return f"<br>Time {self.datetime} --- Value {self.value}"
@APP.route('/refresh')
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
# Get data from OpenAQ
response = aq_functions.get_latest_observations()
# Make Record objects with it and add to db
for record in response:
db_record = Record(datetime=record[0], value=record[1])
DB.session.add(db_record)
DB.session.commit()
return 'Data refreshed!'
@APP.route('/')
def root():
"""Base view."""
danger = Record.query.filter(Record.value >= 10).all()
return str(danger)
|
the-stack_106_27365 | import tensorflow as tf
import experience as xp
import numpy as np
import dqn_model
import os
class Agent:
def __init__(self, env, replay_size, optimizer, batch_size, n_steps, gamma, use_double=True, use_dense=None,
dueling=False, use_categorical=False, n_atoms=None, v_min=None, v_max=None, use_priority=False,
alpha=0.6, beta=0.4, train_steps=5000000):
net = dqn_model.DQN if len(env.observation_space.shape) != 1 else dqn_model.DQNNoConvolution
self.env = env
self.state = None
self.update_count = 0
self.total_reward = 0.0
self.n_steps = n_steps
self.use_double = use_double
self.n_atoms = n_atoms
self.v_min = v_min
self.v_max = v_max
self.beta = beta
self.use_priority = use_priority
if use_priority:
self.exp_buffer = xp.PriorityBuffer(replay_size, gamma, n_steps, alpha)
else:
self.exp_buffer = xp.ExperienceBuffer(replay_size, gamma, n_steps)
self.net = net(env.observation_space.shape, env.action_space.n, use_dense=use_dense, dueling=dueling,
use_distributional=use_categorical, n_atoms=n_atoms, v_min=v_min, v_max=v_max)
self.tgt_net = net(env.observation_space.shape, env.action_space.n, use_dense=use_dense, dueling=dueling,
use_distributional=use_categorical, n_atoms=n_atoms, v_min=v_min, v_max=v_max)
self.params = self.net.trainable_variables
self.optimizer = optimizer
self.batch_size = batch_size
self.use_categorical = use_categorical
self.train_steps = train_steps
self._reset()
def _reset(self):
self.state = self.env.reset()
self.total_reward = 0.0
def play_step(self, epsilon=0.0):
done_reward = None
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
state_a = np.expand_dims(np.array(self.state, copy=False, dtype=np.float32), 0) / 255.0
state_v = tf.convert_to_tensor(state_a)
if self.use_categorical:
q_vals_v = self.net.q_values(state_v)
else:
q_vals_v = self.net(state_v)
act_v = tf.math.argmax(q_vals_v, axis=1)
action = int(act_v.numpy()[0])
new_state, reward, is_done, _ = self.env.step(action)
self.total_reward += reward
exp = xp.Experience(self.state, action, reward, is_done)
self.exp_buffer.append(exp)
self.state = new_state
if is_done:
done_reward = self.total_reward
self._reset()
return done_reward
def sync_weights(self):
self.tgt_net.model.set_weights(self.net.model.get_weights())
def load_checkpoint(self, path):
if os.path.exists(path):
print('Loading checkpoint')
self.net.model.load_weights(path)
self.tgt_net.model.set_weights(self.net.model.get_weights())
def save_checkpoint(self, path):
self.net.model.save_weights(path)
@tf.function
def ll(self, gamma, states_t, next_states_t, actions_t, rewards_t, done_mask, weights):
if self.use_double:
states_t = tf.concat([states_t, next_states_t], 0)
net_output = self.net(states_t)
# Calculate the current state action values
state_action_values = tf.squeeze(
tf.gather(net_output[:self.batch_size], tf.expand_dims(actions_t, 1), batch_dims=1), -1)
state_action_values = tf.where(done_mask, tf.zeros_like(state_action_values), state_action_values)
# Calculate the next state action values
if self.use_double:
next_state_actions = tf.argmax(net_output[self.batch_size:], axis=1)
next_state_values = tf.squeeze(
tf.gather(self.tgt_net(next_states_t), tf.expand_dims(next_state_actions, 1), batch_dims=1), -1)
else:
next_state_values = tf.reduce_max(self.tgt_net(next_states_t), axis=1)
next_state_values = tf.stop_gradient(next_state_values)
# Bellman equation
expected_state_action_values = next_state_values * (gamma ** self.n_steps) + rewards_t
# Calculate loss
losses = tf.math.squared_difference(expected_state_action_values, state_action_values)
losses = tf.math.multiply(weights, losses)
return tf.reduce_mean(losses, axis=-1), tf.add(1.0e-5, losses)
@tf.function
def ll_dist(self, gamma, states_t, next_states_t, actions_t, rewards_t, done_mask, weights):
if self.use_double:
states_t = tf.concat([states_t, next_states_t], 0)
# Calculate current state probabilities
net_output = self.net(states_t)
state_action_dist = tf.nn.log_softmax(net_output[:self.batch_size], axis=-1)
state_action_dist = tf.squeeze(tf.gather(state_action_dist, tf.reshape(actions_t, [-1, 1, 1]), batch_dims=1))
# Calculate next state probabilities
target_net_output = tf.nn.softmax(self.tgt_net(next_states_t), axis=-1)
if self.use_double:
next_state_actions = tf.nn.softmax(net_output[self.batch_size:])
next_best_actions = tf.argmax(tf.reduce_sum(next_state_actions, -1), -1)
else:
next_best_actions = tf.argmax(tf.reduce_sum(target_net_output, -1), -1)
next_state_dist = tf.squeeze(tf.gather(target_net_output, tf.reshape(next_best_actions, [-1, 1, 1]),
batch_dims=1))
# Calculate the Bellman operator T to produce Tz
delta_z = (self.v_max - self.v_min) / (self.n_atoms - 1)
support = tf.linspace(self.v_min, self.v_max, self.n_atoms)
Tz = tf.expand_dims(rewards_t, -1) + tf.expand_dims(tf.cast(tf.logical_not(done_mask), tf.float32), -1) * (
gamma ** self.n_steps) * tf.expand_dims(support, 0)
Tz = tf.clip_by_value(Tz, self.v_min, self.v_max)
b = (Tz - self.v_min) / delta_z
l = tf.math.floor(b)
u = tf.math.floor(b)
# Fix disappearing probability mass
eq_mask = tf.equal(l, u)
u_greater = tf.greater(u, 0)
l_less = tf.less(l, self.n_atoms - 1.0)
l = tf.where(tf.logical_and(eq_mask, u_greater), x=l - 1, y=l)
u = tf.where(tf.logical_and(eq_mask, l_less), x=u + 1, y=u)
m = tf.zeros(self.batch_size * self.n_atoms)
offset = tf.linspace(0.0, ((self.batch_size - 1.0) * self.n_atoms), self.batch_size)
offset = tf.reshape(tf.tile(tf.expand_dims(offset, -1), [1, self.n_atoms]), [-1, 1])
m = tf.tensor_scatter_nd_add(
m,
tf.cast(tf.reshape(l, [-1, 1]) + offset, tf.int32),
tf.reshape(next_state_dist * (u - b), [-1])
)
m = tf.tensor_scatter_nd_add(
m,
tf.cast(tf.reshape(u, [-1, 1]) + offset, tf.int32),
tf.reshape(next_state_dist * (b - l), [-1])
)
m = tf.reshape(m, [self.batch_size, self.n_atoms])
m = tf.stop_gradient(m)
# Calculate loss
losses = -tf.reduce_sum(m * state_action_dist, -1)
losses = tf.multiply(weights, losses)
return tf.reduce_mean(losses, -1), losses
def calc_loss(self, batch, gamma):
if self.use_priority:
states, actions, rewards, dones, next_states, weights = batch[:6]
weights_t = tf.convert_to_tensor(weights)
else:
states, actions, rewards, dones, next_states = batch[:5]
weights_t = tf.ones_like(rewards)
states_t = tf.convert_to_tensor(states)
next_states_t = tf.convert_to_tensor(next_states)
actions_t = tf.convert_to_tensor(actions)
rewards_t = tf.convert_to_tensor(rewards)
done_mask = tf.convert_to_tensor(dones, dtype=bool)
if self.use_categorical:
return self.ll_dist(gamma, states_t, next_states_t, actions_t, rewards_t, done_mask, weights_t)
return self.ll(gamma, states_t, next_states_t, actions_t, rewards_t, done_mask, weights_t)
def step(self, gamma, update_exp_weights=True):
indices = None
beta = min(1.0, self.update_count / (self.train_steps * (1.0 - self.beta)) + self.beta)
batch = self.exp_buffer.sample(self.batch_size, beta)
if self.use_priority:
indices = batch[6]
with tf.GradientTape() as tape:
loss_t, losses = self.calc_loss(batch, gamma)
gradient = tape.gradient(loss_t, self.params)
self.optimizer.apply_gradients(zip(gradient, self.params))
if self.use_priority and update_exp_weights:
self.exp_buffer.update_weights(indices, losses.numpy())
self.update_count += 1
def buffer_size(self):
return len(self.exp_buffer)
|
the-stack_106_27367 | #!/usr/bin/env python3
# Copyright (c) the JPEG XL Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""build_cleaner.py: Update build files.
This tool keeps certain parts of the build files up to date.
"""
import argparse
import collections
import locale
import os
import re
import subprocess
import sys
import tempfile
def RepoFiles(src_dir):
"""Return the list of files from the source git repository"""
git_bin = os.environ.get('GIT_BIN', 'git')
files = subprocess.check_output([git_bin, '-C', src_dir, 'ls-files'])
ret = files.decode(locale.getpreferredencoding()).splitlines()
ret.sort()
return ret
def GetPrefixLibFiles(repo_files, prefix, suffixes=('.h', '.cc', '.ui')):
"""Gets the library files that start with the prefix and end with source
code suffix."""
prefix_files = [
fn for fn in repo_files
if fn.startswith(prefix) and any(fn.endswith(suf) for suf in suffixes)]
return prefix_files
# Type holding the different types of sources in libjxl:
# * decoder and common sources,
# * encoder-only sources,
# * tests-only sources,
# * google benchmark sources,
# * threads library sources,
# * extras library sources,
# * libjxl (encoder+decoder) public include/ headers and
# * threads public include/ headers.
JxlSources = collections.namedtuple(
'JxlSources', ['dec', 'enc', 'test', 'gbench', 'threads',
'extras', 'jxl_public_hdrs', 'threads_public_hdrs'])
def SplitLibFiles(repo_files):
"""Splits the library files into the different groups.
"""
testonly = (
'testdata.h', 'test_utils.h', '_test.h', '_test.cc',
# _testonly.* files are library code used in tests only.
'_testonly.h', '_testonly.cc'
)
main_srcs = GetPrefixLibFiles(repo_files, 'lib/jxl/')
extras_srcs = GetPrefixLibFiles(repo_files, 'lib/extras/')
test_srcs = [fn for fn in main_srcs
if any(patt in fn for patt in testonly)]
lib_srcs = [fn for fn in main_srcs
if not any(patt in fn for patt in testonly)]
# Google benchmark sources.
gbench_srcs = sorted(fn for fn in lib_srcs + extras_srcs
if fn.endswith('_gbench.cc'))
lib_srcs = [fn for fn in lib_srcs if fn not in gbench_srcs]
# Exclude optional codecs from extras.
exclude_extras = ['/codec_gif', '/codec_apng', '/codec_exr']
extras_srcs = [fn for fn in extras_srcs if fn not in gbench_srcs and
not any(patt in fn for patt in testonly) and
not any(patt in fn for patt in exclude_extras)]
enc_srcs = [fn for fn in lib_srcs
if os.path.basename(fn).startswith('enc_') or
os.path.basename(fn).startswith('butteraugli')]
enc_srcs.extend([
"lib/jxl/encode.cc",
"lib/jxl/encode_internal.h",
"lib/jxl/gaborish.cc",
"lib/jxl/gaborish.h",
"lib/jxl/huffman_tree.cc",
"lib/jxl/huffman_tree.h",
# Only the inlines in linalg.h header are used in the decoder.
# TODO(deymo): split out encoder only linalg.h functions.
"lib/jxl/linalg.cc",
"lib/jxl/optimize.cc",
"lib/jxl/optimize.h",
"lib/jxl/progressive_split.cc",
"lib/jxl/progressive_split.h",
# TODO(deymo): Add luminance.cc and luminance.h here too. Currently used
# by aux_out.h.
# dec_file is not intended to be part of the decoder library, so move it
# to the encoder source set
"lib/jxl/dec_file.cc",
"lib/jxl/dec_file.h",
])
# Temporarily remove enc_bit_writer from the encoder sources: a lot of
# decoder source code still needs to be split up into encoder and decoder.
# Including the enc_bit_writer in the decoder allows to build a working
# libjxl_dec library.
# TODO(lode): remove the dependencies of the decoder on enc_bit_writer and
# remove enc_bit_writer from the dec_srcs again.
enc_srcs.remove("lib/jxl/enc_bit_writer.cc")
enc_srcs.remove("lib/jxl/enc_bit_writer.h")
enc_srcs.sort()
enc_srcs_set = set(enc_srcs)
lib_srcs = [fn for fn in lib_srcs if fn not in enc_srcs_set]
# The remaining of the files are in the dec_library.
dec_srcs = lib_srcs
thread_srcs = GetPrefixLibFiles(repo_files, 'lib/threads/')
thread_srcs = [fn for fn in thread_srcs
if not any(patt in fn for patt in testonly)]
public_hdrs = GetPrefixLibFiles(repo_files, 'lib/include/jxl/')
threads_public_hdrs = [fn for fn in public_hdrs if '_parallel_runner' in fn]
jxl_public_hdrs = list(sorted(set(public_hdrs) - set(threads_public_hdrs)))
return JxlSources(dec_srcs, enc_srcs, test_srcs, gbench_srcs, thread_srcs,
extras_srcs, jxl_public_hdrs, threads_public_hdrs)
def CleanFile(args, filename, pattern_data_list):
"""Replace a pattern match with new data in the passed file.
Given a regular expression pattern with a single () match, it runs the regex
over the passed filename and replaces the match () with the new data. If
args.update is set, it will update the file with the new contents, otherwise
it will return True when no changes were needed.
Multiple pairs of (regular expression, new data) can be passed to the
pattern_data_list parameter and will be applied in order.
The regular expression must match at least once in the file.
"""
filepath = os.path.join(args.src_dir, filename)
with open(filepath, 'r') as f:
src_text = f.read()
if not pattern_data_list:
return True
new_text = src_text
for pattern, data in pattern_data_list:
offset = 0
chunks = []
for match in re.finditer(pattern, new_text):
chunks.append(new_text[offset:match.start(1)])
offset = match.end(1)
chunks.append(data)
if not chunks:
raise Exception('Pattern not found for %s: %r' % (filename, pattern))
chunks.append(new_text[offset:])
new_text = ''.join(chunks)
if new_text == src_text:
return True
if args.update:
print('Updating %s' % filename)
with open(filepath, 'w') as f:
f.write(new_text)
return True
else:
with tempfile.NamedTemporaryFile(
mode='w', prefix=os.path.basename(filename)) as new_file:
new_file.write(new_text)
new_file.flush()
subprocess.call(
['diff', '-u', filepath, '--label', 'a/' + filename, new_file.name,
'--label', 'b/' + filename])
return False
def BuildCleaner(args):
repo_files = RepoFiles(args.src_dir)
ok = True
# jxl version
with open(os.path.join(args.src_dir, 'lib/CMakeLists.txt'), 'r') as f:
cmake_text = f.read()
gni_patterns = []
for varname in ('JPEGXL_MAJOR_VERSION', 'JPEGXL_MINOR_VERSION',
'JPEGXL_PATCH_VERSION'):
# Defined in CMakeLists.txt as "set(varname 1234)"
match = re.search(r'set\(' + varname + r' ([0-9]+)\)', cmake_text)
version_value = match.group(1)
gni_patterns.append((r'"' + varname + r'=([0-9]+)"', version_value))
jxl_src = SplitLibFiles(repo_files)
# libjxl
jxl_cmake_patterns = []
jxl_cmake_patterns.append(
(r'set\(JPEGXL_INTERNAL_SOURCES_DEC\n([^\)]+)\)',
''.join(' %s\n' % fn[len('lib/'):] for fn in jxl_src.dec)))
jxl_cmake_patterns.append(
(r'set\(JPEGXL_INTERNAL_SOURCES_ENC\n([^\)]+)\)',
''.join(' %s\n' % fn[len('lib/'):] for fn in jxl_src.enc)))
ok = CleanFile(
args, 'lib/jxl.cmake',
jxl_cmake_patterns) and ok
ok = CleanFile(
args, 'lib/jxl_benchmark.cmake',
[(r'set\(JPEGXL_INTERNAL_SOURCES_GBENCH\n([^\)]+)\)',
''.join(' %s\n' % fn[len('lib/'):] for fn in jxl_src.gbench))]) and ok
gni_patterns.append((
r'libjxl_dec_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):] for fn in jxl_src.dec)))
gni_patterns.append((
r'libjxl_enc_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):] for fn in jxl_src.enc)))
gni_patterns.append((
r'libjxl_gbench_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):] for fn in jxl_src.gbench)))
tests = [fn[len('lib/'):] for fn in jxl_src.test if fn.endswith('_test.cc')]
testlib = [fn[len('lib/'):] for fn in jxl_src.test
if not fn.endswith('_test.cc')]
gni_patterns.append((
r'libjxl_tests_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn for fn in tests)))
gni_patterns.append((
r'libjxl_testlib_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn for fn in testlib)))
# libjxl_threads
ok = CleanFile(
args, 'lib/jxl_threads.cmake',
[(r'set\(JPEGXL_THREADS_SOURCES\n([^\)]+)\)',
''.join(' %s\n' % fn[len('lib/'):] for fn in jxl_src.threads))]) and ok
gni_patterns.append((
r'libjxl_threads_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):] for fn in jxl_src.threads)))
# libjxl_extras
ok = CleanFile(
args, 'lib/jxl_extras.cmake',
[(r'set\(JPEGXL_EXTRAS_SOURCES\n([^\)]+)\)',
''.join(' %s\n' % fn[len('lib/'):] for fn in jxl_src.extras))]) and ok
gni_patterns.append((
r'libjxl_extras_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):] for fn in jxl_src.extras)))
# libjxl_profiler
profiler_srcs = [fn[len('lib/'):] for fn in repo_files
if fn.startswith('lib/profiler')]
ok = CleanFile(
args, 'lib/jxl_profiler.cmake',
[(r'set\(JPEGXL_PROFILER_SOURCES\n([^\)]+)\)',
''.join(' %s\n' % fn for fn in profiler_srcs))]) and ok
gni_patterns.append((
r'libjxl_profiler_sources = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn for fn in profiler_srcs)))
# Public headers.
gni_patterns.append((
r'libjxl_public_headers = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):]
for fn in jxl_src.jxl_public_hdrs)))
gni_patterns.append((
r'libjxl_threads_public_headers = \[\n([^\]]+)\]',
''.join(' "%s",\n' % fn[len('lib/'):]
for fn in jxl_src.threads_public_hdrs)))
# Update the list of tests. CMake version include test files in other libs,
# not just in libjxl.
tests = [fn[len('lib/'):] for fn in repo_files
if fn.endswith('_test.cc') and fn.startswith('lib/')]
ok = CleanFile(
args, 'lib/jxl_tests.cmake',
[(r'set\(TEST_FILES\n([^\)]+) ### Files before this line',
''.join(' %s\n' % fn for fn in tests))]) and ok
ok = CleanFile(
args, 'lib/jxl_tests.cmake',
[(r'set\(TESTLIB_FILES\n([^\)]+)\)',
''.join(' %s\n' % fn for fn in testlib))]) and ok
# Update lib.gni
ok = CleanFile(args, 'lib/lib.gni', gni_patterns) and ok
return ok
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--src-dir',
default=os.path.realpath(os.path.join(
os.path.dirname(__file__), '..')),
help='path to the build directory')
parser.add_argument('--update', default=False, action='store_true',
help='update the build files instead of only checking')
args = parser.parse_args()
if not BuildCleaner(args):
print('Build files need update.')
sys.exit(2)
if __name__ == '__main__':
main()
|
the-stack_106_27368 | import torch
def unitwise_norm(x, norm_type=2.0):
if x.ndim <= 1:
return x.norm(norm_type)
else:
# works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor
# might need special cases for other weights (possibly MHA) where this may not be true
return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True)
def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
for p in parameters:
if p.grad is None:
continue
p_data = p.detach()
g_data = p.grad.detach()
max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor)
grad_norm = unitwise_norm(g_data, norm_type=norm_type)
clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6))
new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad)
p.grad.detach().copy_(new_grads) |
the-stack_106_27369 | import json
from django.contrib import messages
class AjaxMessagesMiddleware(object):
"""
Middleware to handle messages for AJAX requests.
If the AJAX response is already JSON, add a "messages" key to it (or
append to an existing "messages" key) a list of messages (each
message is an object with "level", "message", and "tags" keys).
If the AJAX response is currently html, turn it into JSON and stuff
the HTML content into the "html" key, adding a "messages" key as
well.
If the AJAX response is neither json nor html, return it as-is (with
no messages attached, and without iterating over messages).
If the AJAX response has a status code other than 200, or has an attribute
``no_messages`` that is ``True``, it will not be modified (and messages
will not be read).
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
handle_response = (
request.is_ajax() and
response.status_code == 200 and
not getattr(response, 'no_messages', False)
)
if handle_response:
content_type = response.get('content-type', 'None').split(";")[0]
content = response.content.decode('utf-8')
if content_type == "application/json":
data = json.loads(content)
elif content_type == "text/html":
data = {"html": content}
else:
return response
messagelist = data.setdefault("messages", [])
for message in messages.get_messages(request):
messagelist.append({
"level": message.level,
"message": str(message.message),
"tags": message.tags,
})
response.content = json.dumps(data)
response["content-type"] = "application/json"
response["content-length"] = len(response.content)
return response
|
the-stack_106_27373 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average.
The new value of 'variable' can be set with the 'AssignSub' op as:
variable -= (1 - decay) * (variable - value)
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'
decay: A float Tensor or float value. The moving average decay.
name: Optional name of the returned operation.
Returns:
An Operation that updates 'variable' with the newly computed
moving average.
"""
with ops.op_scope([variable, value, decay], name, "AssignMovingAvg") as scope:
with ops.device(variable.device):
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
return state_ops.assign_sub(variable, (variable - value) * decay,
name=scope)
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1.
maintain_averages_op = ema.apply([var0, var1])
# Create an op that will update the moving averages after each training
# step. This is what we will use in place of the usuall trainig op.
with tf.control_dependencies([opt_op]):
training_op = tf.group(maintain_averages_op)
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the [Saver class](../../api_docs/python/train.md#Saver) for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.train.Saver({shadow_var0_name: var0, shadow_var1_name: var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
@@__init__
@@apply
@@average_name
@@average
@@variables_to_restore
"""
def __init__(self, decay, num_updates=None,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `Apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. . It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
name: String. Optional prefix name to use for the name of ops added in
`Apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._name = name
self._averages = {}
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0.
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.all_variables()`.
Returns an op that updates all shadow variables as described above.
Note that `apply()` can be called multiple times with different lists of
variables.
Args:
var_list: A list of Variable or Tensor objects. The variables
and Tensors must be of types float32 or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not all float32 or float64.
ValueError: If the moving average of one of the variables is already
being computed.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
for var in var_list:
if var.dtype.base_dtype not in [dtypes.float32, dtypes.float64]:
raise TypeError("The variables must be float or double: %s" % var)
if var in self._averages:
raise ValueError("Moving average already computed for: %s" % var)
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.control_dependencies(None):
if isinstance(var, variables.Variable):
avg = slot_creator.create_slot(
var, var.initialized_value(), self._name,
colocate_with_primary=True)
else:
avg = slot_creator.create_zeros_slot(
var, self._name,
colocate_with_primary=(var.op.type == "Variable"))
self._averages[var] = avg
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
with ops.name_scope(self._name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(self._num_updates, dtypes.float32,
name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
updates.append(assign_moving_average(self._averages[var], var, decay))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained..
"""
return self._averages.get(var, None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: the name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
return var.op.name + "/" + self._name
def variables_to_restore(self):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Returns:
A map from restore_names to variables. The restore_name can be the
moving_average version of the variable name if it exist, or the original
variable name.
"""
name_map = {}
# Collect all the variables with moving average, including all
# the trainable variables and variables which have been explicitly
# added to the collection.
moving_avg_variables = list(set(variables.moving_average_variables() +
variables.trainable_variables()))
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving average as well.
for v in list(set(variables.all_variables()) - set(moving_avg_variables)):
if v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
|
the-stack_106_27374 | import pytest
from unittest import mock
import json
from django.core.exceptions import ValidationError
from awx.main.models import (
UnifiedJob,
InventoryUpdate,
Inventory,
Credential,
CredentialType,
InventorySource,
)
def test_cancel(mocker):
with mock.patch.object(UnifiedJob, 'cancel', return_value=True) as parent_cancel:
iu = InventoryUpdate()
iu.save = mocker.MagicMock()
build_job_explanation_mock = mocker.MagicMock()
iu._build_job_explanation = mocker.MagicMock(return_value=build_job_explanation_mock)
iu.cancel()
parent_cancel.assert_called_with(is_chain=False, job_explanation=None)
def test__build_job_explanation():
iu = InventoryUpdate(id=3, name='I_am_an_Inventory_Update')
job_explanation = iu._build_job_explanation()
assert job_explanation == 'Previous Task Canceled: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
'inventory_update',
'I_am_an_Inventory_Update',
3,
)
def test_valid_clean_insights_credential():
cred_type = CredentialType.defaults['insights']()
insights_cred = Credential(credential_type=cred_type)
inv = Inventory(insights_credential=insights_cred)
inv.clean_insights_credential()
def test_invalid_clean_insights_credential():
cred_type = CredentialType.defaults['scm']()
cred = Credential(credential_type=cred_type)
inv = Inventory(insights_credential=cred)
with pytest.raises(ValidationError) as e:
inv.clean_insights_credential()
assert json.dumps(str(e.value)) == json.dumps(str([u"Credential kind must be 'insights'."]))
def test_valid_kind_clean_insights_credential():
inv = Inventory(kind='smart')
inv.clean_insights_credential()
def test_invalid_kind_clean_insights_credential():
cred_type = CredentialType.defaults['insights']()
insights_cred = Credential(credential_type=cred_type)
inv = Inventory(kind='smart', insights_credential=insights_cred)
with pytest.raises(ValidationError) as e:
inv.clean_insights_credential()
assert json.dumps(str(e.value)) == json.dumps(str([u'Assignment not allowed for Smart Inventory']))
class TestControlledBySCM:
def test_clean_source_path_valid(self):
inv_src = InventorySource(source_path='/not_real/', source='scm')
inv_src.clean_source_path()
@pytest.mark.parametrize(
'source',
[
'ec2',
'manual',
],
)
def test_clean_source_path_invalid(self, source):
inv_src = InventorySource(source_path='/not_real/', source=source)
with pytest.raises(ValidationError):
inv_src.clean_source_path()
def test_clean_update_on_launch_update_on_project_update(self):
inv_src = InventorySource(update_on_project_update=True, update_on_launch=True, source='scm')
with pytest.raises(ValidationError):
inv_src.clean_update_on_launch()
|
the-stack_106_27377 | from typing import List, Tuple, Type, Union, Callable, Optional, Dict, Any
import torch as th
import torch.nn.functional as F
import numpy as np
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.sac.policies import SACPolicy
class SAC(OffPolicyAlgorithm):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
(https://github.com/rail-berkeley/softlearning/)
and from Stable Baselines (https://github.com/hill-a/stable-baselines)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
Note: we use double q target and not value target as discussed
in https://github.com/hill-a/stable-baselines/issues/270
:param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: (GymEnv or str) The environment to learn from (if registered in Gym, can be str)
:param learning_rate: (float or callable) learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param batch_size: (int) Minibatch size for each gradient update
:param tau: (float) the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: (float) the discount factor
:param train_freq: (int) Update the model every ``train_freq`` steps.
:param gradient_steps: (int) How many gradient update after each step
:param n_episodes_rollout: (int) Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``
:param action_noise: (ActionNoise) the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param optimize_memory_usage: (bool) Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param target_update_interval: (int) update the target network every ``target_network_update_freq``
gradient steps.
:param target_entropy: (str or float) target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
:param use_sde: (bool) Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: (int) Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: (bool) Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param create_eval_env: (bool) Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param verbose: (int) the verbosity level: 0 no output, 1 info, 2 debug
:param seed: (int) Seed for the pseudo random generators
:param device: (str or th.device) Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
"""
def __init__(self, policy: Union[str, Type[SACPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Callable] = 3e-4,
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: int = 1,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
action_noise: Optional[ActionNoise] = None,
optimize_memory_usage: bool = False,
ent_coef: Union[str, float] = 'auto',
target_update_interval: int = 1,
target_entropy: Union[str, float] = 'auto',
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Dict[str, Any] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = 'auto',
_init_setup_model: bool = True):
super(SAC, self).__init__(policy, env, SACPolicy, learning_rate,
buffer_size, learning_starts, batch_size,
tau, gamma, train_freq, gradient_steps,
n_episodes_rollout, action_noise,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose, device=device,
create_eval_env=create_eval_env, seed=seed,
use_sde=use_sde, sde_sample_freq=sde_sample_freq,
use_sde_at_warmup=use_sde_at_warmup,
optimize_memory_usage=optimize_memory_usage)
self.target_entropy = target_entropy
self.log_ent_coef = None # type: Optional[th.Tensor]
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.ent_coef_optimizer = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(SAC, self)._setup_model()
self._create_aliases()
assert self.critic.n_critics == 2, "SAC only supports `n_critics=2` for now"
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == 'auto':
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'):
# Default initial value of ent_coef when learned
init_value = 1.0
if '_' in self.ent_coef:
init_value = float(self.ent_coef.split('_')[1])
assert init_value > 0., "The initial value of ent_coef must be greater than 0"
# Note: we optimize the log of the entropy coeff which is slightly different from the paper
# as discussed in https://github.com/rail-berkeley/softlearning/issues/37
self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)
self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 64) -> None:
# Update optimizers learning rate
optimizers = [self.actor.optimizer, self.critic.optimizer]
if self.ent_coef_optimizer is not None:
optimizers += [self.ent_coef_optimizer]
# Update learning rate according to lr schedule
self._update_learning_rate(optimizers)
ent_coef_losses, ent_coefs = [], []
actor_losses, critic_losses = [], []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# We need to sample because `log_std` may have changed between two gradient steps
if self.use_sde:
self.actor.reset_noise()
# Action by the current actor for the sampled state
actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)
log_prob = log_prob.reshape(-1, 1)
ent_coef_loss = None
if self.ent_coef_optimizer is not None:
# Important: detach the variable from the graph
# so we don't change it with other losses
# see https://github.com/rail-berkeley/softlearning/issues/60
ent_coef = th.exp(self.log_ent_coef.detach())
ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()
ent_coef_losses.append(ent_coef_loss.item())
else:
ent_coef = self.ent_coef_tensor
ent_coefs.append(ent_coef.item())
# Optimize entropy coefficient, also called
# entropy temperature or alpha in the paper
if ent_coef_loss is not None:
self.ent_coef_optimizer.zero_grad()
ent_coef_loss.backward()
self.ent_coef_optimizer.step()
with th.no_grad():
# Select action according to policy
next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)
# Compute the target Q value
target_q1, target_q2 = self.critic_target(replay_data.next_observations, next_actions)
target_q = th.min(target_q1, target_q2) - ent_coef * next_log_prob.reshape(-1, 1)
# td error + entropy term
q_backup = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
# Get current Q estimates
# using action from the replay buffer
current_q1, current_q2 = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = 0.5 * (F.mse_loss(current_q1, q_backup) + F.mse_loss(current_q2, q_backup))
critic_losses.append(critic_loss.item())
# Optimize the critic
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Compute actor loss
# Alternative: actor_loss = th.mean(log_prob - qf1_pi)
qf1_pi, qf2_pi = self.critic.forward(replay_data.observations, actions_pi)
min_qf_pi = th.min(qf1_pi, qf2_pi)
actor_loss = (ent_coef * log_prob - min_qf_pi).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
# Update target networks
if gradient_step % self.target_update_interval == 0:
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude='tensorboard')
logger.record("train/ent_coef", np.mean(ent_coefs))
logger.record("train/actor_loss", np.mean(actor_losses))
logger.record("train/critic_loss", np.mean(critic_losses))
if len(ent_coef_losses) > 0:
logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
def learn(self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "SAC",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True) -> OffPolicyAlgorithm:
return super(SAC, self).learn(total_timesteps=total_timesteps, callback=callback, log_interval=log_interval,
eval_env=eval_env, eval_freq=eval_freq, n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name, eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps)
def excluded_save_params(self) -> List[str]:
"""
Returns the names of the parameters that should be excluded by default
when saving the model.
:return: (List[str]) List of parameters that should be excluded from save
"""
# Exclude aliases
return super(SAC, self).excluded_save_params() + ["actor", "critic", "critic_target"]
def get_torch_variables(self) -> Tuple[List[str], List[str]]:
"""
cf base class
"""
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
saved_tensors = ['log_ent_coef']
if self.ent_coef_optimizer is not None:
state_dicts.append('ent_coef_optimizer')
else:
saved_tensors.append('ent_coef_tensor')
return state_dicts, saved_tensors
|
the-stack_106_27380 | """
For each train, we check all the other trains to check how many collide, and we choose the case
where there is maximum collision.
"""
def minimumPlatform(n,arr,dep):
'''
:param n: number of activities
:param arr: arrival time of trains
:param dep: corresponding departure time of trains
:return: Integer, minimum number of platforms needed
'''
ma = 0
for i in range(n) :
clash = 1
for j in range(n) :
if i != j :
if (arr[i] <= arr[j] <= dep[i]) or (arr[i] <= dep[j] <= dep[i]) or ((arr[i] >= arr[j]) and (dep[i] <= dep[j])) :
clash += 1
if clash > ma :
ma = clash
return ma
import atexit
import io
import sys
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases) :
n = int(input())
arrival = list(map(str,input().strip().split()))
departure = list(map(str,input().strip().split()))
print(minimumPlatform(n,arrival,departure))
|
the-stack_106_27381 | from __future__ import absolute_import, print_function, unicode_literals
import inspect
from wolframclient.utils.functional import flatten
# original idea by Guido in person.
# https://www.artima.com/weblogs/viewpost.jsp?thread=101605
class Dispatch(object):
""" A method dispatcher class allowing for multiple implementations of a function. Each implementation is associated to a specific input type.
Implementations are registered with the annotation :meth:`~wolframclient.utils.dispatch.Dispatch.dispatch`.
The Dispatch class is callable, it behaves as a function that uses the implementation corresponding to the input parameter.
When a type is a subtype, the type and its parents are checked in the order given by :data:`__mro__` (method resolution order).
*Example:* method :meth:`~wolframclient.utils.dispatch.Dispatch.resolve` applied to an instance of :class:`collections.OrderedDict`,
check for the first implementation to match with :class:`collections.OrderedDict`, then with :class:`dict`, and ultimately to :data:`object`.
Once the mapping is determined, it is cached for later use.
"""
def __init__(self):
self.clear()
def dispatch(self, *args, **opts):
""" Annotate a function and map it to a given set of type(s).
Declare an implementation to use on :data:`bytearray` input::
@dispatcher.dispatch(bytearray)
def my_func(...)
The default implementation is associated with :data:`object`. Set a default::
@dispatcher.dispatch(object)
def my_default_func(...)
A tuple can be used as input to associate more than one type with a function.
Declare a function used for both :data:`bytes` and :data:`bytearray`::
@dispatcher.dispatch((bytes, bytearray))
def my_func(...)
Implementation must be unique. By default, registering the same combination of types will raise an error.
Set `replace_existing` to :data:`True` to update the current mapping.
Or, set `keep_existing` to :data:`True` to ignore duplicate registration and keep the existing mapping.
"""
def register(func):
return self.register(func, *args, **opts)
return register
def update(self, dispatch, **opts):
""" Update current mapping with the one from `dispatch`.
`dispatch` can be a Dispatch instance or a :class:`dict`.
`**opts` are passed to :meth:`~wolframclient.utils.dispatch.Dispatch.register`
"""
if isinstance(dispatch, Dispatch):
dispatchmapping = dispatch.dispatch_dict
elif isinstance(dispatch, dict):
dispatchmapping = dispatch
else:
raise ValueError("%s is not an instance of Dispatch" % dispatch)
for t, function in dispatchmapping.items():
self.register(function, t, **opts)
def validate_types(self, types):
for t in frozenset(flatten(types)):
if not inspect.isclass(t):
raise ValueError("%s is not a class" % t)
yield t
def register(self, function, types=object, keep_existing=False, replace_existing=False):
""" Equivalent to annotation :meth:`~wolframclient.utils.dispatch.Dispatch.dispatch` but as
a function.
"""
if not callable(function):
raise ValueError("Function %s is not callable" % function)
if keep_existing and replace_existing:
raise ValueError(
"Option values keep_existing and replace_existing cannot be both True."
)
self.clear_cache()
for t in self.validate_types(types):
if replace_existing:
self.dispatch_dict[t] = function
elif t in self.dispatch_dict:
if not keep_existing:
raise TypeError("Duplicated registration for input type(s): %s" % (t,))
else:
self.dispatch_dict[t] = function
return function
def unregister(self, types=object):
""" Remove implementations associated with types. """
self.clear_cache()
for t in self.validate_types(types):
try:
del self.dispatch_dict[t]
except KeyError:
pass
def clear(self):
""" Reset the dispatcher to its initial state. """
self.dispatch_dict = dict()
self.dispatch_dict_cache = dict()
def clear_cache(self):
if self.dispatch_dict_cache:
self.dispatch_dict_cache = dict()
def resolve(self, arg):
""" Return the implementation better matching the type the argument type. """
for t in arg.__class__.__mro__:
try:
return self.dispatch_dict_cache[t]
except KeyError:
impl = self.dispatch_dict.get(t, None)
if impl:
self.dispatch_dict_cache[t] = impl
return impl
return self.default_function
def default_function(self, *args, **opts):
""" Ultimately called when no type was found. """
raise ValueError("Unable to handle args")
def __call__(self, arg, *args, **opts):
return self.resolve(arg)(arg, *args, **opts)
def as_method(self):
""" Return the dispatch as a class method.
Create a new dispatcher::
dispatch = Dispatcher()
Use the dispatcher as a class method::
class MyClass(object):
myMethod = dispatch.as_method()
Call the class method::
o = MyClass()
o.myMethod(arg, *args, **kwargs)
"""
def method(instance, arg, *args, **opts):
return self.resolve(arg)(instance, arg, *args, **opts)
return method
|
the-stack_106_27384 | import logging
import operator
import os
from collections import namedtuple
import numpy as np
import gc
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
from torch.utils.tensorboard import SummaryWriter
import utils as project_utils
import nasws.cnn.utils
from nasws.cnn.search_space.nasbench101.util import change_model_spec
from nasws.cnn.search_space.nasbench101.model import NasBenchNet
import nasws.cnn.policy.nao_policy.utils as nao_utils
from nasws.cnn.policy.nao_policy.utils_for_nasbench import NASBench101NAOParsing, NASBench101NAOParsing_v1
from visualization.process_data import tensorboard_summarize_list
from nasws.cnn.procedures.train_search_procedure import nao_model_validation_nasbench
from ..cnn_general_search_policies import CNNSearchPolicy
from .model import NASNetworkCIFAR
from .model_search import NASWSNetworkCIFAR
from .model_search_nasbench import NasBenchNetSearchNAO
from .controller import NAO_Darts, NAO_Nasbench
Rank = namedtuple('Rank', 'valid_acc valid_obj geno_id gt_rank')
class NAONasBenchSearch(CNNSearchPolicy):
top_K_complete_evaluate = 100
# nao_search_config
def wrap_nao_search_config(self):
for k, v in self.args.nao_search_config.__dict__.items():
self.args.__dict__[k] = v
def initialize_model(self):
"""
Initialize model, may change across different model.
:return:
"""
parallel_model, optimizer, scheduler = super().initialize_model()
args = self.args
if self.args.search_space == 'nasbench':
utils = NASBench101NAOParsing_v1()
# utils = nao_nasbench_utils
nao = NAO_Nasbench(
args.controller_encoder_layers,
args.controller_encoder_vocab_size,
args.controller_encoder_hidden_size,
args.controller_encoder_dropout,
args.controller_encoder_length,
args.controller_source_length,
args.controller_encoder_emb_size,
args.controller_mlp_layers,
args.controller_mlp_hidden_size,
args.controller_mlp_dropout,
args.controller_decoder_layers,
args.controller_decoder_vocab_size,
args.controller_decoder_hidden_size,
args.controller_decoder_dropout,
args.controller_decoder_length,
args=args,
)
else:
utils = nao_utils
nao = NAO_Darts(
args.controller_encoder_layers,
args.controller_encoder_vocab_size,
args.controller_encoder_hidden_size,
args.controller_encoder_dropout,
args.controller_encoder_length,
args.controller_source_length,
args.controller_encoder_emb_size,
args.controller_mlp_layers,
args.controller_mlp_hidden_size,
args.controller_mlp_dropout,
args.controller_decoder_layers,
args.controller_decoder_vocab_size,
args.controller_decoder_hidden_size,
args.controller_decoder_dropout,
args.controller_decoder_length,
)
nao = nao.cuda()
logging.info("Encoder-Predictor-Decoder param size = %fMB", utils.count_parameters_in_MB(nao))
self.controller = nao
return model, optimizer, scheduler, nao
def run(self):
args = self.args
self.utils = nao_nasbench_utils if args.search_space == 'nasbench' else nao_utils
utils = self.utils
self.nao_search_config = args.nao_search_config
self.wrap_nao_search_config()
train_queue, valid_queue, test_queue, train_criterion, eval_criterion = self.initialize_run()
args.steps = int(np.ceil(45000 / args.child_batch_size)) * args.child_epochs
if args.child_arch_pool is not None:
logging.info('Architecture pool is provided, loading')
with open(args.child_arch_pool) as f:
archs = f.read().splitlines()
archs = list(map(self.utils.build_dag, archs))
child_arch_pool = archs
elif os.path.exists(os.path.join(self.exp_dir, 'arch_pool')):
logging.info('Architecture pool is founded, loading')
with open(os.path.join(self.exp_dir, 'arch_pool')) as f:
archs = f.read().splitlines()
archs = list(map(self.utils.build_dag, archs))
child_arch_pool = archs
else:
child_arch_pool = None
child_eval_epochs = eval(args.child_eval_epochs)
# build the functions.
args = self.args
model, optimizer, scheduler, nao = self.initialize_model()
fitness_dict = {}
self.optimizer = optimizer
self.scheduler = scheduler
# Train child model
if child_arch_pool is None:
logging.info('Architecture pool is not provided, randomly generating now')
child_arch_pool = self.generate_new_arch(args.controller_seed_arch)
child_arch_pool_prob = self.child_arch_pool_prob(child_arch_pool)
eval_points = self.utils.generate_eval_points(child_eval_epochs, 0, args.child_epochs)
logging.info("eval epochs = %s", eval_points)
step = 0
# Begin the full loop
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# sample an arch to train
train_acc, train_obj, step = self.child_train(train_queue, model, optimizer, step, child_arch_pool,
child_arch_pool_prob, train_criterion)
if self.writer:
self.writer.add_scalar(f'train/loss', train_obj, epoch)
self.writer.add_scalar(f'train/top_1_acc', train_acc, epoch)
logging.info('train_acc %f', train_acc)
if epoch not in eval_points:
# Continue training the architectures
continue
# Evaluate seed archs
valid_accuracy_list = self.child_valid(valid_queue, model, child_arch_pool, eval_criterion)
# Output archs and evaluated error rate
old_archs = child_arch_pool
old_archs_perf = valid_accuracy_list
old_archs_sorted_indices = np.argsort(old_archs_perf)[::-1]
old_archs = [old_archs[i] for i in old_archs_sorted_indices]
old_archs_perf = [old_archs_perf[i] for i in old_archs_sorted_indices]
with open(os.path.join(self.exp_dir, 'arch_pool.{}'.format(epoch)), 'w') as fa:
with open(os.path.join(self.exp_dir, 'arch_pool.perf.{}'.format(epoch)), 'w') as fp:
with open(os.path.join(self.exp_dir, 'arch_pool'), 'w') as fa_latest:
with open(os.path.join(self.exp_dir, 'arch_pool.perf'), 'w') as fp_latest:
for arch, perf in zip(old_archs, old_archs_perf):
arch = self.process_archname(arch)
fa.write('{}\n'.format(arch))
fa_latest.write('{}\n'.format(arch))
fp.write('{}\n'.format(perf))
fp_latest.write('{}\n'.format(perf))
if epoch == args.child_epochs:
break
# Train Encoder-Predictor-Decoder
logging.info('Training Encoder-Predictor-Decoder')
min_val = min(old_archs_perf)
max_val = max(old_archs_perf)
encoder_target = [(i - min_val) / (max_val - min_val) for i in old_archs_perf]
encoder_input = self.process_arch_to_seq(old_archs)
if args.controller_expand is not None:
train_encoder_input, train_encoder_target, valid_encoder_input, valid_encoder_target = \
self.expand_controller(encoder_input, encoder_target)
else:
train_encoder_input = encoder_input
train_encoder_target = encoder_target
valid_encoder_input = encoder_input
valid_encoder_target = encoder_target
logging.info('Train data: {}\tValid data: {}'.format(len(train_encoder_input), len(valid_encoder_input)))
# gerenrate NAO dataset.
nao_train_dataset = utils.NAODataset(train_encoder_input, train_encoder_target, True,
swap=True if args.controller_expand is None else False)
nao_valid_dataset = utils.NAODataset(valid_encoder_input, valid_encoder_target, False)
nao_train_queue = torch.utils.data.DataLoader(
nao_train_dataset, batch_size=args.controller_batch_size, shuffle=True, pin_memory=True)
nao_valid_queue = torch.utils.data.DataLoader(
nao_valid_dataset, batch_size=args.controller_batch_size, shuffle=False, pin_memory=True)
nao_optimizer = torch.optim.Adam(nao.parameters(), lr=args.controller_lr,
weight_decay=args.controller_l2_reg)
# train the sampler.
for nao_epoch in range(1, args.controller_epochs + 1):
nao_loss, nao_mse, nao_ce = self.nao_train(nao_train_queue, nao, nao_optimizer)
logging.info("epoch %04d train loss %.6f mse %.6f ce %.6f", nao_epoch, nao_loss, nao_mse, nao_ce)
if nao_epoch % 100 == 0:
pa, hs = self.nao_valid(nao_valid_queue, nao)
logging.info("Evaluation on valid data")
logging.info('epoch %04d pairwise accuracy %.6f hamming distance %.6f', epoch, pa, hs)
self.writer.add_scalar(f'valid/hamming_distance', hs, epoch)
self.writer.add_scalar(f'valid/pairwise_acc', pa, epoch)
# Generate new archs
new_archs = []
max_step_size = 50
predict_step_size = 0
top100_archs = self.process_arch_to_seq(old_archs[:100])
nao_infer_dataset = utils.NAODataset(top100_archs, None, False)
nao_infer_queue = torch.utils.data.DataLoader(
nao_infer_dataset, batch_size=len(nao_infer_dataset), shuffle=False, pin_memory=True)
while len(new_archs) < args.controller_new_arch:
predict_step_size += 1
logging.info('Generate new architectures with step size %d', predict_step_size)
new_arch = self.nao_infer(nao_infer_queue, nao, predict_step_size, direction='+')
for arch in new_arch:
if arch not in encoder_input and arch not in new_archs:
new_archs.append(arch)
if len(new_archs) >= args.controller_new_arch:
break
logging.info('%d new archs generated now', len(new_archs))
if predict_step_size > max_step_size:
break
# new_archs = list(map(lambda x: utils.parse_seq_to_arch(x, 2), new_archs))
new_archs = self.process_seq_to_arch(new_archs)
num_new_archs = len(new_archs)
logging.info("Generate %d new archs", num_new_archs)
# replace bottom archs
if args.controller_replace:
new_arch_pool = old_archs[:len(old_archs) - (num_new_archs + args.controller_random_arch)] + \
new_archs + self.generate_new_arch(args.controller_random_arch)
# discard all archs except top k
elif args.controller_discard:
new_arch_pool = old_archs[:100] + new_archs + self.generate_new_arch(args.controller_random_arch)
# use all
else:
new_arch_pool = old_archs + new_archs + self.generate_new_arch(args.controller_random_arch)
logging.info("Totally %d architectures now to train", len(new_arch_pool))
child_arch_pool = new_arch_pool
with open(os.path.join(self.exp_dir, 'arch_pool'), 'w') as f:
for arch in new_arch_pool:
f.write('{}\n'.format(self.process_archname(arch)))
child_arch_pool_prob = self.child_arch_pool_prob(child_arch_pool)
if epoch % self.args.save_every_epoch == 0:
project_utils.save_checkpoint(model, optimizer, epoch, self.exp_dir)
# add later, return the model specs that is evaluated across the time.
# Process the ranking in the end, return the best of training.
# IPython.embed(header="Pause for nothing.")
fitness_dict = self.evaluate(epoch, test_queue,
fitnesses_dict=fitness_dict,
arch_pool=child_arch_pool,
train_queue=train_queue,
criterion=eval_criterion)
project_utils.save_checkpoint(model, optimizer, epoch, self.exp_dir)
self.save_results(epoch, rank_details=True)
ep_k = [k for k in self.ranking_per_epoch.keys()][-1]
best_id = self.ranking_per_epoch[ep_k][-1][1].geno_id
return best_id, self.nasbench_model_specs[best_id]
def child_train(self, train_queue, model, optimizer, global_step, arch_pool, arch_pool_prob, criterion):
utils = self.utils
objs = nasws.cnn.utils.AverageMeter()
top1 = nasws.cnn.utils.AverageMeter()
top5 = nasws.cnn.utils.AverageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
if self.args.debug:
if step > 10:
print("Break after 10 batch")
break
input = input.cuda().requires_grad_()
target = target.cuda()
optimizer.zero_grad()
# sample an arch to train
arch = utils.sample_arch(arch_pool, arch_pool_prob)
arch_l = arch
arch = self.process_arch(arch)
logits, aux_logits = model(input, arch, global_step, bn_train=False)
global_step += 1
loss = criterion(logits, target)
if aux_logits is not None:
aux_loss = criterion(aux_logits, target)
loss += 0.4 * aux_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), self.args.child_grad_bound)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if (step + 1) % 100 == 0:
logging.info('Train %03d loss %e top1 %f top5 %f', step + 1, objs.avg, top1.avg, top5.avg)
logging.info('Arch: %s', self.process_archname(arch_l))
return top1.avg, objs.avg, global_step
def child_valid(self,valid_queue, model, arch_pool, criterion):
valid_acc_list = []
with torch.no_grad():
model.eval()
for i, arch in enumerate(arch_pool):
# for step, (inputs, targets) in enumerate(valid_queue):
inputs, targets = next(iter(valid_queue))
inputs = inputs.cuda()
targets = targets.cuda()
arch_l = arch
arch = self.process_arch(arch)
logits, _ = model(inputs, arch, bn_train=True)
loss = criterion(logits, targets)
prec1, prec5 = self.utils.accuracy(logits, targets, topk=(1, 5))
valid_acc_list.append(prec1.data / 100)
if (i + 1) % 100 == 0:
logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', self.process_archname(arch_l),
loss, prec1, prec5)
return valid_acc_list
def nao_train(self, train_queue, model, optimizer):
args = self.args
objs = nasws.cnn.utils.AverageMeter()
mse = nasws.cnn.utils.AverageMeter()
nll = nasws.cnn.utils.AverageMeter()
model.train()
for step, sample in enumerate(train_queue):
encoder_input = sample['encoder_input']
encoder_target = sample['encoder_target']
decoder_input = sample['decoder_input']
decoder_target = sample['decoder_target']
encoder_input = encoder_input.cuda()
encoder_target = encoder_target.cuda().requires_grad_()
decoder_input = decoder_input.cuda()
decoder_target = decoder_target.cuda()
optimizer.zero_grad()
predict_value, log_prob, arch = model(encoder_input, decoder_input)
loss_1 = F.mse_loss(predict_value.squeeze(), encoder_target.squeeze())
loss_2 = F.nll_loss(log_prob.contiguous().view(-1, log_prob.size(-1)), decoder_target.view(-1))
loss = args.controller_trade_off * loss_1 + (1 - args.controller_trade_off) * loss_2
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.controller_grad_bound)
optimizer.step()
n = encoder_input.size(0)
objs.update(loss.data, n)
mse.update(loss_1.data, n)
nll.update(loss_2.data, n)
return objs.avg, mse.avg, nll.avg
@staticmethod
def nao_valid(queue, model):
pa = nasws.cnn.utils.AverageMeter()
hs = nasws.cnn.utils.AverageMeter()
with torch.no_grad():
model.eval()
for step, sample in enumerate(queue):
encoder_input = sample['encoder_input']
encoder_target = sample['encoder_target']
decoder_target = sample['decoder_target']
encoder_input = encoder_input.cuda()
encoder_target = encoder_target.cuda()
decoder_target = decoder_target.cuda()
predict_value, logits, arch = model(encoder_input)
n = encoder_input.size(0)
pairwise_acc = nao_utils.pairwise_accuracy(encoder_target.data.squeeze().tolist(),
predict_value.data.squeeze().tolist())
hamming_dis = nao_utils.hamming_distance(decoder_target.data.squeeze().tolist(),
arch.data.squeeze().tolist())
pa.update(pairwise_acc, n)
hs.update(hamming_dis, n)
return pa.avg, hs.avg
@staticmethod
def nao_infer(queue, model, step, direction='+'):
new_arch_list = []
model.eval()
for i, sample in enumerate(queue):
encoder_input = sample['encoder_input']
encoder_input = encoder_input.cuda()
model.zero_grad()
new_arch = model.generate_new_arch(encoder_input, step, direction=direction)
new_arch_list.extend(new_arch.data.squeeze().tolist())
return new_arch_list
def expand_controller(self, encoder_input, encoder_target):
args = self.args
dataset = list(zip(encoder_input, encoder_target))
n = len(dataset)
split = int(n * args.ratio)
np.random.shuffle(dataset)
encoder_input, encoder_target = list(zip(*dataset))
train_encoder_input = list(encoder_input[:split])
train_encoder_target = list(encoder_target[:split])
valid_encoder_input = list(encoder_input[split:])
valid_encoder_target = list(encoder_target[split:])
if args.search_space == 'nasbench':
for _ in range(args.controller_expand - 1):
for src, tgt in zip(encoder_input[:split], encoder_target[:split]):
train_encoder_input.append(src)
train_encoder_target.append(tgt)
else:
for _ in range(args.controller_expand - 1):
# TODO what is controller expand?????
for src, tgt in zip(encoder_input[:split], encoder_target[:split]):
a = np.random.randint(0, args.child_nodes)
b = np.random.randint(0, args.child_nodes)
src = src[:4 * a] + src[4 * a + 2:4 * a + 4] + \
src[4 * a:4 * a + 2] + src[4 * (a + 1):20 + 4 * b] + \
src[20 + 4 * b + 2:20 + 4 * b + 4] + src[20 + 4 * b:20 + 4 * b + 2] + \
src[20 + 4 * (b + 1):]
train_encoder_input.append(src)
train_encoder_target.append(tgt)
return train_encoder_input, train_encoder_target, valid_encoder_input, valid_encoder_target
# def process_arch(self,arch):
# if self.args.search_space == 'nasbench':
# matrix, ops = nao_nasbench_utils.parse_arch_to_model_spec_matrix_op(arch, self.args.child_nodes)
# model_spec = ModelSpec_v2(matrix, ops)
# return model_spec
# else:
# return arch
def process_arch_to_seq(self, old_archs):
if self.args.search_space =='nasbench':
encoder_input =list(map(lambda x: self.utils.parse_arch_to_seq(x, 2, self.args.child_nodes), old_archs))
else:
encoder_input = list(
map(lambda x: self.utils.parse_arch_to_seq(x[0], 2) + self.utils.parse_arch_to_seq(x[1], 2), old_archs))
return encoder_input
def process_seq_to_arch(self, old_archs):
if self.args.search_space =='nasbench':
encoder_input =list(map(lambda x: self.utils.parse_seq_to_arch(x, 2, self.args.child_nodes), old_archs))
else:
encoder_input = list(
map(lambda x: self.utils.parse_seq_to_arch(x[0], 2) + self.utils.parse_seq_to_arch(x[1], 2), old_archs))
return encoder_input
def process_archname(self, arch):
if self.args.search_space == 'nasbench':
return ' '.join(map(str, arch))
else:
return ' '.join(map(str, arch[0] + arch[1]))
def generate_new_arch(self, num_new):
num_ops = 3 if self.args.search_space == 'nasbench' else 5
child_arch_pool = self.utils.generate_arch(num_new, self.args.child_nodes, num_ops)
return child_arch_pool
def evaluate(self, epoch, data_source, arch_pool=None, fitnesses_dict=None, train_queue=None, criterion=None):
"""
Full evaluation of all possible models.
:param epoch:
:param data_source:
:param fitnesses_dict: Store the model_spec_id -> accuracy
:return:
"""
fitnesses_dict = fitnesses_dict or {}
total_avg_acc = 0
total_avg_obj = 0
# rank dict for the possible solutions
model_specs_rank = {}
model_specs_rank_before = {}
queries = {}
# as backup
ind = 0
eval_result = {}
# let us sample 200 architecture to evaluate. # just keep the top K.
clean_arch_pool = self.clean_arch_pool(arch_pool)[:self.top_K_complete_evaluate]
while ind < len(clean_arch_pool):
# get this id
if self.args.debug and ind > 10:
break
arch = clean_arch_pool[ind]
new_model_spec = self.utils.parse_arch_to_model_spec(clean_arch_pool[ind])
ind += 1 # increment this.
try:
model_spec_id = self.nasbench_hashs.index(new_model_spec.hash_spec())
except Exception as e:
logging.error(e)
continue
query = {'test accuracy':self.search_space.nasbench.perf_rank[model_spec_id]}
# selecting the current subDAG in our DAG to train
change_model_spec(self.parallel_model, new_model_spec)
# Reset the weights.
# evaluate before train
self.logger.info('evaluate the model spec id: {}'.format(model_spec_id))
_avg_val_acc, _avg_val_acc5, _avg_val_obj = self.child_test(data_source, self.parallel_model, arch, criterion=criterion)
eval_result[model_spec_id] = _avg_val_acc, _avg_val_obj
logging.info("Query: {}".format(query))
# update the total loss.
total_avg_acc += _avg_val_acc
total_avg_obj += _avg_val_obj
# saving the particle fit in our dictionaries
fitnesses_dict[model_spec_id] = _avg_val_acc
ms_hash = self.nasbench_hashs[model_spec_id]
model_specs_rank[ms_hash] = Rank(_avg_val_acc, _avg_val_obj, model_spec_id,
self.search_space.rank_by_mid[model_spec_id])
queries[ms_hash] = query
gc.collect()
# save the ranking, according to their GENOTYPE but not particle id
rank_gens = sorted(model_specs_rank.items(), key=operator.itemgetter(1))
self.ranking_per_epoch[epoch] = rank_gens
self.eval_result[epoch] = eval_result
# IPython.embed(header="Check evaluation result")
self.logger.info('VALIDATION RANKING OF PARTICLES')
for pos, elem in enumerate(rank_gens):
self.logger.info(f'particle gen id: {elem[1].geno_id}, acc: {elem[1].valid_acc}, obj {elem[1].valid_obj}, '
f'hash: {elem[0]}, pos {pos}')
if self.writer:
# process data into list.
accs_after, objs_after = zip(*eval_result.values())
tensorboard_summarize_list(accs_after, writer=self.writer, key='neweval_after/acc', step=epoch, ascending=False)
tensorboard_summarize_list(objs_after, writer=self.writer, key='neweval_after/obj', step=epoch)
return fitnesses_dict
def process_nasbench(self):
super(NAONasBenchSearch, self).process_nasbench(only_hash=False)
def child_test(self, test_queue, model, arch, criterion, verbose=True):
utils = self.utils
objs = nasws.cnn.utils.AverageMeter()
top1 = nasws.cnn.utils.AverageMeter()
top5 = nasws.cnn.utils.AverageMeter()
model.eval()
# arch_l = arch
arch = self.utils.parse_arch_to_model_spec(arch)
with torch.no_grad():
for step, (input, target) in enumerate(test_queue):
if self.args.debug:
if step > 10:
print("Break after 10 batch")
break
input = input.cuda()
target = target.cuda()
logits, _ = model(input, arch, bn_train=False)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % self.args.report_freq == 0 and step > 0 and verbose:
logging.info('test | step %03d | loss %e | acc %f | acc-5 %f', step, objs.avg, top1.avg,
top5.avg)
return top1.avg, top5.avg, objs.avg
def clean_arch_pool(self, arch_pool):
new_arch_pool = []
for i in arch_pool:
if not i in new_arch_pool:
new_arch_pool.append(i)
return new_arch_pool
def child_arch_pool_prob(self, child_arch_pool):
# raise NotImplementedError('do not support params for now ...')
args = self.args
if args.child_sample_policy == 'params':
child_arch_pool_prob = []
print('num is %d', len(child_arch_pool))
for arch in child_arch_pool:
# just to count the parameters and use as prob, kind of a biased sampling.
# use model hash to query.
if self.args.search_space == 'nasbench':
tmp_model = self.fixmodel_fn(3, self.process_arch(arch), args.nasbench_config)
else:
tmp_model = self.fixmodel_fn(args, 10, args.child_layers, args.child_nodes, args.child_channels,
args.child_keep_prob, args.child_drop_path_keep_prob,
args.child_use_aux_head, args.steps, arch)
child_arch_pool_prob.append(self.utils.count_parameters_in_MB(tmp_model))
del tmp_model
else:
child_arch_pool_prob = None
return child_arch_pool_prob
|
the-stack_106_27385 | import os
import errno
import pytest
from dvc.cache import NamedCache
from dvc.path_info import PathInfo
from dvc.remote.local import RemoteLOCAL
def test_status_download_optimization(mocker):
"""When comparing the status to pull a remote cache,
And the desired files to fetch are already on the local cache,
Don't check the existence of the desired files on the remote cache
"""
remote = RemoteLOCAL(None, {})
infos = NamedCache()
infos.add("local", "acbd18db4cc2f85cedef654fccc4a4d8", "foo")
infos.add("local", "37b51d194a7513e45b56f6524f2d51f2", "bar")
local_exists = list(infos["local"])
mocker.patch.object(remote, "cache_exists", return_value=local_exists)
other_remote = mocker.Mock()
other_remote.url = "other_remote"
other_remote.cache_exists.return_value = []
remote.status(infos, other_remote, download=True)
assert other_remote.cache_exists.call_count == 0
@pytest.mark.parametrize("link_name", ["hardlink", "symlink"])
def test_is_protected(tmp_dir, link_name):
remote = RemoteLOCAL(None, {})
link_method = getattr(remote, link_name)
(tmp_dir / "foo").write_text("foo")
foo = PathInfo(tmp_dir / "foo")
link = PathInfo(tmp_dir / "link")
link_method(foo, link)
assert not remote.is_protected(foo)
assert not remote.is_protected(link)
remote.protect(foo)
assert remote.is_protected(foo)
assert remote.is_protected(link)
remote.unprotect(link)
assert not remote.is_protected(link)
if link_name == "symlink" and os.name == "nt":
# NOTE: Windows symlink perms don't propagate to the target
assert remote.is_protected(foo)
else:
assert not remote.is_protected(foo)
@pytest.mark.parametrize("err", [errno.EPERM, errno.EACCES])
def test_protect_ignore_errors(tmp_dir, mocker, err):
tmp_dir.gen("foo", "foo")
foo = PathInfo("foo")
remote = RemoteLOCAL(None, {})
remote.protect(foo)
mock_chmod = mocker.patch(
"os.chmod", side_effect=OSError(err, "something")
)
remote.protect(foo)
assert mock_chmod.called
def test_protect_ignore_erofs(tmp_dir, mocker):
tmp_dir.gen("foo", "foo")
foo = PathInfo("foo")
remote = RemoteLOCAL(None, {})
mock_chmod = mocker.patch(
"os.chmod", side_effect=OSError(errno.EROFS, "read-only fs")
)
remote.protect(foo)
assert mock_chmod.called
|
the-stack_106_27388 | import unittest
import struct
from zttf.utils import fixed_version, binary_search_parameters, ttf_checksum, glyph_more_components, glyf_skip_format
class TestUtils(unittest.TestCase):
def test_fixed_version(self):
cases = [
(0x00005000, 0.5),
(0x00010000, 1.0),
(0x00035000, 3.5),
(0x00105000, 10.5)
]
for case in cases:
self.assertEqual(fixed_version(case[0]), case[1])
def test_binary_parameters(self):
cases = {
39: (32, 5),
10: (8, 3),
19: (16, 4)
}
for n, result in cases.items():
self.assertEqual(binary_search_parameters(n), result)
def test_checksum(self):
data = struct.pack(">12I", *range(0, 12))
self.assertEqual(len(data), 48)
self.assertEqual(ttf_checksum(data), 66)
self.assertEqual(ttf_checksum(struct.pack(">12I", *range(1000, 13000, 1000))), 78000)
self.assertEqual(ttf_checksum(struct.pack(">512I", *range(1024, 1024 * 2048, 4096))), 0x1FF80000)
def test_component_flag(self):
self.assertTrue(glyph_more_components((1 << 5)))
self.assertFalse(glyph_more_components((1 << 4)))
def test_skip_format(self):
self.assertEqual(glyf_skip_format((1 << 0)), ">I")
self.assertEqual(glyf_skip_format(0), ">H")
self.assertEqual(glyf_skip_format((1 << 3)), ">HH")
self.assertEqual(glyf_skip_format((1 << 3) | (1 << 0)), ">IH")
self.assertEqual(glyf_skip_format((1 << 7)), ">HII")
|
the-stack_106_27390 | import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='ticklen',
parent_name='splom.marker.colorbar',
**kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_106_27391 | import json
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union
import requests
from model.track import Track
class Groups(Enum):
"""Enum for group domains."""
JAZZVE = 'jazzve'
SOUNDFIELDS = 'soundfields'
GLBDOM = 'glbdom'
RADIANT_SOUND = 'radiant_sound_home'
CRAFT_MUSIC = 'craftmusique'
class GroupsIDs(Enum):
"""Enum for group ids."""
JAZZVE = 79069156
SOUNDFIELDS = 5124202
GLBDOM = 152579809
RADIANT_SOUND = 32159054
CRAFT_MUSIC = 40030199
@dataclass
class VKAPIController:
_vk_api_token: str
_api_version: float = 5.131
_desired_genres: tuple = ('house', 'funk', 'disco', 'soul')
@staticmethod
def _check_date(post_date_ms: int) -> bool:
"""Check if the post was published yesterday. Otherwise, return False."""
post_date = datetime.fromtimestamp(post_date_ms).date()
yesterday_date = (datetime.today() - timedelta(days=1)).date()
return post_date == yesterday_date
def _request_posts(self, group_domain: str) -> List:
"""Send HTTP-request to VK API to get last 20 posts for 'group_domain'."""
url = 'https://api.vk.com/method/wall.get'
params: Dict[str, Union[int, str, float]] = {
'domain': group_domain,
'count': 20,
'access_token': self._vk_api_token,
'v': self._api_version
}
response = requests.get(url, params=params)
assert response.status_code == 200, (f'Request failed to get posts from {group_domain} '
f'with {response.status_code}. Reason: {response.reason}')
return json.loads(response.text)['response']['items']
@staticmethod
def _compose_full_name(audio: Dict) -> str:
"""Compose full track name."""
subtitle = audio.get('subtitle')
title = audio['title']
if subtitle:
return f'{title} {subtitle}'
return title
def _get_yesterday_posts(self, group_domain: str) -> List[Dict]:
"""Get posts that were published yesterday for given VK 'group_domain'."""
all_posts = self._request_posts(group_domain)
yesterday_posts = [post for post in all_posts if self._check_date(post['date'])]
return yesterday_posts
# Not being used for now
def _get_group_id(self, group_domain: str) -> int:
"""Get group id by its domain."""
url = 'https://api.vk.com/method/utils.resolveScreenName'
params: Dict[str, Union[float, str]] = {
'screen_name': group_domain,
'access_token': self._vk_api_token,
'v': self._api_version
}
response = requests.get(url=url, params=params)
assert response.status_code == 200
return response.json()['response']['object_id']
# Not being used for now
def _get_post_url(self, group_id: int, post_id: int) -> str:
"""Get post url based on group and post ids."""
url = 'https://api.vk.com/method/wall.getById'
params: Dict[str, Union[str, float, List[str]]] = {
'posts': [f'{group_id}_{post_id}'],
'access_token': self._vk_api_token,
'v': self._api_version
}
response = requests.get(url=url, params=params)
return response.json()
def _process_post(self, post: Dict, group_id: int) -> Tuple[List[Track], Optional[str]]:
"""Process a certain post."""
tracks: List[Track] = []
# if post is a repost
if post.get('copy_history'):
repost = post['copy_history'][0]
if not repost.get('attachments'):
return [], None
tracks_from_post = [
attch['audio'] for attch in repost['attachments'] if attch['type'] == 'audio'
]
# just a usual post
else:
if not post.get('attachments'):
return [], None
tracks_from_post = [attch['audio'] for attch in post['attachments'] if attch['type'] == 'audio']
# If there are no tracks in a post, it means that it is a VK-playlist which can't be parsed
playlist_post_url = None
if not tracks_from_post:
playlist_post_url = f'vk.com/wall-{group_id}_{post["id"]}'
for track_from_post in tracks_from_post:
artist = Track.compose_artist_name(track_from_post['artist'])
full_name = Track.compose_full_name(track_from_post)
alternative_name = track_from_post['title']
post_url = f'vk.com/wall-{group_id}_{post["id"]}'
tracks.append(Track(artist, full_name, alternative_name, post_url))
return tracks, playlist_post_url
def _check_genres(self, post_genres: List[str]) -> bool:
"""Check if post genres intersect with desired genres."""
if set(post_genres).intersection(self._desired_genres):
return True
return False
def process_groups(self) -> Tuple[List[Track], List[str]]:
"""
Process all yesterday posts in all groups presented in Groups class.
-------
Returns
Tuple[List[Track], List[str]]
First element of tuple is a list of yesterday tracks from all the groups presented in
'Groups' class.
Second element of tuple is a list of VK urls which consists of playlists which can't be parsed.
"""
tracks: List[Track] = []
playlist_post_urls: List[str] = []
for group in Groups:
yesterday_posts = self._get_yesterday_posts(group.value)
for post in yesterday_posts:
if group is Groups.RADIANT_SOUND:
if not post.get('text'):
continue
genres = post['text'].split('\n')[1].split('/')
if not self._check_genres(genres):
continue
if group is Groups.SOUNDFIELDS:
if post['text'] not in ('#somegoods', '#qweektunes'):
genres = post['text'].split('\n')[-1].replace('#', '').split(' ')
if not self._check_genres(genres):
continue
found_tracks, playlist_post_url = self._process_post(post, GroupsIDs[group.name].value)
tracks.extend(found_tracks)
if playlist_post_url:
playlist_post_urls.append(playlist_post_url)
return tracks, playlist_post_urls
|
the-stack_106_27395 |
import numpy as np
import torch
from torch import nn
from collections import OrderedDict
import torchvision
class Resnet18(nn.Module):
def __init__(self, bottleneck_connection_channel=32):
"""
bottleneck_connection_channel: connection channel for VOneBlock
"""
super(Resnet18, self).__init__()
self.customized_layer = nn.Sequential(
nn.Conv2d(bottleneck_connection_channel, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
)
self.origin_layer = nn.Sequential(
*list(torchvision.models.resnet18(pretrained=False).children())[4:-1]
)
self.fc_layer = nn.Sequential(
nn.Linear(in_features=512, out_features=1000, bias=True)
)
def forward(self, x):
# print(x.shape)
out = self.customized_layer(x)
out = self.origin_layer(out)
# print(out.shape)
out = torch.flatten(out, start_dim=1)
out = self.fc_layer(out)
return out
class Resnet50(nn.Module):
def __init__(self, bottleneck_connection_channel=32, n_classes=1000):
"""
bottleneck_connection_channel: connection channel for VOneBlock
"""
super(Resnet50, self).__init__()
self.bottleneck = nn.Conv2d(bottleneck_connection_channel, 64, stride=1, kernel_size=(1, 1))
self.layers = nn.Sequential (
*list(torchvision.models.resnet50(pretrained=False).children())[1:-2]
)
self.flatten = nn.Sequential(
nn.Conv2d(2048, n_classes, stride=1, kernel_size=(2, 2))
)
def forward(self, x):
out = self.bottleneck(x)
out = self.layers(out)
out = self.flatten(out)
out = out.view(out.shape[0], -1)
return out
# AlexNet Back-End architecture
# Based on Torchvision implementation in
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
class AlexNetBackEnd(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(32, 192, kernel_size=5, stride=2, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# ResNet Back-End architecture
# Based on Torchvision implementation in
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Identity(nn.Module):
def forward(self, x):
return x
def forward(self, inp):
x = self.conv_input(inp)
for t in range(self.times):
if t == 0:
skip = self.norm_skip(self.skip(x))
self.conv2.stride = (2, 2)
else:
skip = x
self.conv2.stride = (1, 1)
x = self.conv1(x)
x = getattr(self, f'norm1_{t}')(x)
x = self.nonlin1(x)
x = self.conv2(x)
x = getattr(self, f'norm2_{t}')(x)
x = self.nonlin2(x)
x = self.conv3(x)
x = getattr(self, f'norm3_{t}')(x)
x += skip
x = self.nonlin3(x)
output = self.output(x)
return output
import torch.functional as F
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# For the sake of studying ML, I will use deeper network
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(2, 2), padding=1)
self.pool = nn.MaxPool2d(kernel_size=3, stride=3)
self.fc1 = nn.Sequential(
nn.Linear(512, 512),
# nn.ReLU(inplace=8
nn.ReLU(inplace=True)
)
self.fc2 = nn.Sequential(
nn.Linear(512, 512),
nn.ReLU(inplace=True)
# nn.Tanh()
)
self.fc3 = nn.Linear(512, 10)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# print(x.shape)
x = self.pool(self.relu(self.conv1(x)))
# Batch Normalization(x)
x = self.pool(self.relu(self.conv2(x)))
x = self.conv3(x)
# Flattening
before_fc = x.size(0)
x = x.view(x.size(0), -1)
# print(x.shape)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class Basic_Linear_Regression(nn.Module):
def __init__(self):
super(Basic_Linear_Regression, self).__init__()
self.voneblock_connector = nn.Linear(32 * 28 * 28, 28 * 28, bias=True)
self.fc1 = nn.Linear(28 * 28, 28 * 28, bias=True)
self.fc2 = nn.Linear(28 * 28, 10, bias=True)
self.relu = nn.ReLU(inplace=True)
self.flatten = nn.Flatten()
def forward(self, x):
x = self.flatten(x)
# for VOneBlock
x = self.voneblock_connector(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class Basic_CNN(nn.Module):
def __init__(self):
super(Basic_CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc = nn.Linear(7 * 7 * 64, 10, bias=True)
nn.init.xavier_uniform(self.fc.weight)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
the-stack_106_27408 | import Cifras.bases_numericas as bases_numericas
import dicionarios
def codificar_texto_para_UTF8(texto):
if not texto:
return dicionarios.retorna_erro_mensagem()
codigo_final_utf8 = ''
for caractere in texto:
num_binario = bases_numericas.converter_decimal_para_binario(ord(caractere))
if len(num_binario) <= 7:
codigo_final_utf8 += transformar_7bits_UTF8(num_binario)
elif len(num_binario) <= 11:
codigo_final_utf8 += transformar_11bits_UTF8(num_binario)
elif len(num_binario) <= 16:
codigo_final_utf8 += transformar_16bits_UTF8(num_binario)
elif len(num_binario) <= 21:
codigo_final_utf8 += transformar_21bits_UTF8(num_binario)
else: # "Passou do limite do programa" (é possível ler mais bytes, mas nesse programa não será lido...)
return dicionarios.retorna_erro_mensagem()
return codigo_final_utf8
def decodificar_UTF8_para_texto(codigo_UTF8):
tamanho_codigo_UTF8 = len(codigo_UTF8)
if not codigo_UTF8 or tamanho_codigo_UTF8 % 8 != 0:
return dicionarios.retorna_erro_mensagem()
texto_decodicado = ''
i = 0
while i < tamanho_codigo_UTF8:
if codigo_UTF8[i:i+5] == '11110':
bin_atual = codigo_UTF8[i+5:i+8] + codigo_UTF8[i+10:i+16] + codigo_UTF8[i+18:i+24] + codigo_UTF8[i+26:i+32]
i += 32
elif codigo_UTF8[i:i+4] == '1110':
bin_atual = codigo_UTF8[i+4:i+8] + codigo_UTF8[i+10:i+16] + codigo_UTF8[i+18:i+24]
i += 24
elif codigo_UTF8[i:i+3] == '110':
bin_atual = codigo_UTF8[i+3:i+8] + codigo_UTF8[i+10:i+16]
i += 16
elif codigo_UTF8[i] == '0':
bin_atual = codigo_UTF8[i+1:i+8]
i += 8
else:
return dicionarios.retorna_erro_mensagem()
UNICODE_novo_caractere = bases_numericas.converter_binario_para_decimal(bin_atual, tirar_zeros_esq=False)
if not UNICODE_novo_caractere:
return dicionarios.retorna_erro_mensagem()
texto_decodicado += chr(bases_numericas.converter_binario_para_decimal(bin_atual, tirar_zeros_esq=False))
return texto_decodicado
def transformar_7bits_UTF8(num_binario):
return '0' * (8 - len(num_binario)) + num_binario
def transformar_11bits_UTF8(num_binario):
num_binario = '0' * (11 - len(num_binario)) + num_binario
return '110' + num_binario[:5] + '10' + num_binario[5:]
def transformar_16bits_UTF8(num_binario):
num_binario = '0' * (16 - len(num_binario)) + num_binario
return '1110' + num_binario[:4] + '10' + num_binario[4:10] + '10' + num_binario[10:]
def transformar_21bits_UTF8(num_binario):
num_binario = '0' * (21 - len(num_binario)) + num_binario
return '11110' + num_binario[:3] + '10' + num_binario[3:9] + '10' + num_binario[9:15] + '10' + num_binario[15:]
|
the-stack_106_27409 | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests the gce module."""
from __future__ import print_function
import os
from chromite.lib import cros_test_lib
from chromite.lib import gce
from chromite.lib import osutils
from googleapiclient.errors import HttpError
from googleapiclient.http import HttpMockSequence
from oauth2client.client import GoogleCredentials
class GceTest(cros_test_lib.MockTempDirTestCase):
"""Unit tests for the gce module."""
_PROJECT = 'foo-project'
_ZONE = 'foo-zone'
def setUp(self):
self.json_key_file = os.path.join(self.tempdir, 'service_account.json')
osutils.Touch(self.json_key_file)
for cmd in ('from_stream', 'create_scoped'):
self.PatchObject(GoogleCredentials, cmd, autospec=True)
self.PatchObject(gce.GceContext, 'GetZoneRegion', autospec=True,
return_value=self._ZONE)
@cros_test_lib.NetworkTest()
def testGetImage(self):
"""Tests that GetImage returns the correct image."""
good_http = HttpMockSequence([
({'status': '200',}, '{"name": "foo-image"}',),
])
# Assert that GetImage does not complain if an image is found.
self.PatchObject(gce.GceContext, '_BuildRetriableRequest', autospec=True,
side_effect=self._MockOutBuildRetriableRequest(good_http))
gce_context = gce.GceContext.ForServiceAccount(self._PROJECT, self._ZONE,
self.json_key_file)
self.assertDictEqual(gce_context.GetImage('foo-image'),
dict(name='foo-image'))
@cros_test_lib.NetworkTest()
def testGetImageRaisesIfImageNotFound(self):
"""Tests that GetImage raies exception when image is not found."""
bad_http = HttpMockSequence([
({'status': '404',}, 'Image not found.',),
])
# Assert that GetImage raises if image is not found.
self.PatchObject(gce.GceContext, '_BuildRetriableRequest', autospec=True,
side_effect=self._MockOutBuildRetriableRequest(bad_http))
gce_context = gce.GceContext.ForServiceAccount(self._PROJECT, self._ZONE,
self.json_key_file)
with self.assertRaises(gce.ResourceNotFoundError):
gce_context.GetImage('not-exising-image')
@cros_test_lib.NetworkTest()
def testRetryOnServerErrorHttpRequest(self):
"""Tests that 500 erros are retried."""
# Fake http sequence that does not return 200 until the third trial.
mock_http = HttpMockSequence([
({'status': '502'}, 'Server error'),
({'status': '502'}, 'Server error'),
({'status': '200'}, '{"name":"foo-image"}'),
])
self.PatchObject(gce.GceContext, '_BuildRetriableRequest', autospec=True,
side_effect=self._MockOutBuildRetriableRequest(mock_http))
# Disable retry and expect the request to fail.
gce.GceContext.RETRIES = 0
gce_context = gce.GceContext.ForServiceAccount(self._PROJECT, self._ZONE,
self.json_key_file)
with self.assertRaises(HttpError):
gce_context.GetImage('foo-image')
# Enable retry and expect the request to succeed.
gce.GceContext.RETRIES = 2
gce_context = gce.GceContext.ForServiceAccount(self._PROJECT, self._ZONE,
self.json_key_file)
self.assertDictEqual(gce_context.GetImage('foo-image'),
dict(name='foo-image'))
def _MockOutBuildRetriableRequest(self, mock_http):
"""Returns a mock closure of _BuildRetriableRequest.
Fake a GceContext._BuildRetriableRequest() that always uses |mock_http| as
transport.
"""
def _BuildRetriableRequest(_self, num_retries, _http, _thread_safe,
_credentials, *args, **kwargs):
return gce.RetryOnServerErrorHttpRequest(num_retries, mock_http, *args,
**kwargs)
return _BuildRetriableRequest
|
the-stack_106_27410 | """
FIN module customisations for RLPPTM
License: MIT
"""
from collections import OrderedDict
from gluon import current, A, DIV, IS_EMPTY_OR, IS_INT_IN_RANGE, TAG
from core import FS, IS_ONE_OF, s3_str
ISSUER_ORG_TYPE = "pe_id$pe_id:org_organisation.org_organisation_organisation_type.organisation_type_id"
# -------------------------------------------------------------------------
def fin_voucher_resource(r, tablename):
T = current.T
auth = current.auth
has_role = auth.s3_has_role
s3db = current.s3db
table = s3db.fin_voucher
# Determine form mode
resource = r.resource
group_voucher = resource.tablename == "fin_voucher" and \
r.get_vars.get("g") == "1"
# Customise fields
field = table.pe_id
field.label = T("Issuer##fin")
from core import WithAdvice
field = table.bearer_dob
if group_voucher:
label = T("Group Representative Date of Birth")
intro = "GroupDoBIntro"
else:
label = T("Beneficiary Date of Birth")
intro = "BearerDoBIntro"
field.label = label
field.widget = WithAdvice(field.widget,
text = ("fin", "voucher", intro),
)
if not has_role("VOUCHER_ISSUER"):
field.readable = field.writable = False
field = table.initial_credit
field.label = T("Number of Beneficiaries")
if group_voucher:
field.default = None
field.requires = IS_INT_IN_RANGE(1, 51,
error_message = T("Enter the number of beneficiaries (max %(max)s)"),
)
field.readable = field.writable = True
field = table.comments
field.label = T("Memoranda")
field.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Memoranda"),
T("Notes of the Issuer"),
),
)
if not has_role("VOUCHER_PROVIDER"):
field.readable = field.writable = False
# Custom list fields
if has_role("VOUCHER_ISSUER"):
list_fields = ["program_id",
"signature",
(T("Beneficiary/Representative Date of Birth"), "bearer_dob"),
"initial_credit",
"credit_spent",
(T("Status"), "status"),
"date",
#"valid_until",
"comments",
]
else:
list_fields = ["program_id",
"signature",
(T("Status"), "status"),
"pe_id",
#(T("Issuer Type"), ISSUER_ORG_TYPE),
"eligibility_type_id",
"initial_credit",
"credit_spent",
"date",
#"valid_until",
]
# Report Options
if r.method == "report":
facts = ((T("Credit Redeemed"), "sum(credit_spent)"),
(T("Credit Issued"), "sum(initial_credit)"),
(T("Remaining Credit"), "sum(balance)"),
(T("Number of Vouchers"), "count(id)"),
)
axes = [ISSUER_ORG_TYPE,
"eligibility_type_id",
"program_id",
"status",
"pe_id",
]
report_options = {
"rows": axes,
"cols": axes,
"fact": facts,
"defaults": {"rows": axes[0],
"cols": axes[1],
"fact": facts[0],
"totals": True,
},
}
s3db.configure("fin_voucher",
report_options = report_options,
)
s3db.configure("fin_voucher",
list_fields = list_fields,
orderby = "fin_voucher.date desc",
)
# -------------------------------------------------------------------------
def fin_voucher_controller(**attr):
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
# Enable bigtable features
settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
result = standard_prep(r) if callable(standard_prep) else True
# Restrict data formats
settings.ui.export_formats = None
representation = r.representation
ALLOWED_FORMATS = ("html", "iframe", "popup", "aadata", "json")
if representation not in ALLOWED_FORMATS and \
not(r.record and representation == "card"):
r.error(403, current.ERROR.NOT_PERMITTED)
is_program_manager = current.auth.s3_has_role("PROGRAM_MANAGER")
db = current.db
s3db = current.s3db
# Check which programs and organisations the user can issue vouchers for
program_ids, org_ids, pe_ids = s3db.fin_voucher_permitted_programs(mode="issuer")
resource = r.resource
table = resource.table
if program_ids and org_ids:
etypes = s3db.fin_voucher_eligibility_types(program_ids, org_ids)
program_ids = list(etypes.keys())
if not program_ids or not org_ids:
# User is not permitted to issue vouchers for any programs/issuers
resource.configure(insertable = False)
else:
# Limit the program selector to permitted+active programs
field = table.program_id
ptable = s3db.fin_voucher_program
dbset = db(ptable.id.belongs(program_ids))
field.requires = IS_ONE_OF(dbset, "fin_voucher_program.id",
field.represent,
sort = True,
)
# Default the program selector if only one program can be chosen
if len(program_ids) == 1:
program_id = program_ids[0]
field.default = program_id
field.writable = False
# Limit the eligibility type selector to applicable types
allow_empty = False
if len(program_ids) == 1:
etype_ids = etypes[program_ids[0]]
else:
etype_ids = []
for item in etypes.values():
if item:
etype_ids += item
else:
allow_empty = True
etype_ids = list(set(etype_ids)) if etype_ids else None
field = table.eligibility_type_id
if etype_ids is None:
# No selectable eligibility types => hide selector
field.readable = field.writable = False
elif len(etype_ids) == 1 and not allow_empty:
# Only one type selectable => default
field.default = etype_ids[0]
field.writable = False
else:
# Multiple types selectable
ttable = s3db.fin_voucher_eligibility_type
etset = db(ttable.id.belongs(etype_ids))
field.requires = IS_ONE_OF(etset, "fin_voucher_eligibility_type.id",
field.represent,
sort = True,
)
if allow_empty:
field.requires = IS_EMPTY_OR(field.requires)
# Limit the issuer selector to permitted entities
etable = s3db.pr_pentity
field = table.pe_id
dbset = db(etable.pe_id.belongs(pe_ids))
field.requires = IS_ONE_OF(dbset, "pr_pentity.pe_id",
field.represent,
)
# Hide the issuer selector if only one entity can be chosen
if len(pe_ids) == 1:
field.default = pe_ids[0]
field.readable = field.writable = False
if r.interactive:
if r.get_vars.get("g") == "1":
s3.crud_strings["fin_voucher"]["label_create"] = T("Create Group Voucher")
# Hide valid_until from create-form (will be set onaccept)
field = table.valid_until
field.readable = bool(r.record)
field.writable = False
# Always show number of beneficiaries
if r.record:
field = table.initial_credit
field.readable = True
# Filter Widgets
from core import DateFilter, TextFilter
text_fields = ["signature", "comments", "program_id$name"]
if is_program_manager:
text_fields.append("pe_id$pe_id:org_organisation.name")
filter_widgets = [
TextFilter(text_fields,
label = T("Search"),
),
DateFilter("date",
),
]
if is_program_manager:
from core import OptionsFilter, get_filter_options
filter_widgets.extend([
OptionsFilter("eligibility_type_id",
hidden = True,
label = T("Type of Eligibility"),
),
OptionsFilter(ISSUER_ORG_TYPE,
hidden = True,
label = T("Issuer Type"),
options = lambda: get_filter_options("org_organisation_type"),
),
])
resource.configure(filter_widgets = filter_widgets,
)
elif r.representation == "card":
# Configure ID card layout
from ..vouchers import VoucherCardLayout
resource.configure(pdf_card_layout = VoucherCardLayout,
pdf_card_suffix = lambda record: \
s3_str(record.signature) \
if record and record.signature else None,
)
return result
s3.prep = prep
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if not r.component and isinstance(output, dict):
if r.record and r.method in (None, "update", "read"):
# Custom CRUD buttons
if "buttons" not in output:
buttons = output["buttons"] = {}
else:
buttons = output["buttons"]
# PDF-button
pdf_download = A(T("Download PDF"),
_href = "/%s/fin/voucher/%s.card" % (r.application, r.record.id),
_class="action-btn",
)
# Render in place of the delete-button
buttons["delete_btn"] = TAG[""](pdf_download,
)
return output
s3.postp = custom_postp
# Custom rheader
from ..rheaders import rlpptm_fin_rheader
attr["rheader"] = rlpptm_fin_rheader
return attr
# -------------------------------------------------------------------------
def fin_voucher_debit_resource(r, tablename):
T = current.T
auth = current.auth
has_role = auth.s3_has_role
s3db = current.s3db
table = s3db.fin_voucher_debit
# Determine form mode
resource = r.resource
group_voucher = resource.tablename == "fin_voucher_debit" and \
r.get_vars.get("g") == "1"
# Customise fields
field = table.comments
field.label = T("Memoranda")
field.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Memoranda"),
T("Notes of the Provider"),
),
)
if not has_role("VOUCHER_PROVIDER"):
field.readable = field.writable = False
field = table.bearer_dob
if group_voucher:
label = T("Group Representative Date of Birth")
else:
label = T("Beneficiary Date of Birth")
field.label = label
if not has_role("VOUCHER_PROVIDER"):
field.readable = field.writable = False
field = table.quantity
if group_voucher:
field.default = None
field.requires = IS_INT_IN_RANGE(1,
error_message = T("Enter the service quantity"),
)
field.readable = field.writable = True
field = table.balance
field.label = T("Remaining Compensation Claims")
# Custom list_fields
list_fields = [(T("Date"), "date"),
"program_id",
"voucher_id$signature",
"quantity",
"status",
]
if current.auth.s3_has_roles(("PROGRAM_MANAGER", "PROGRAM_ACCOUNTANT")):
# Include issuer and provider
list_fields[3:3] = ["voucher_id$pe_id",
"pe_id",
]
if has_role("VOUCHER_PROVIDER"):
# Include provider notes
list_fields.append("comments")
s3db.configure("fin_voucher_debit",
list_fields = list_fields,
)
# Filters
if r.interactive:
from core import DateFilter, TextFilter
filter_widgets = [TextFilter(["program_id$name",
"signature",
],
label = T("Search"),
),
DateFilter("date",
label = T("Date"),
),
]
s3db.configure("fin_voucher_debit",
filter_widgets = filter_widgets,
)
# Report options
if r.method == "report":
field = table.created_by
field.represent = s3db.auth_UserRepresent(show_name = True,
show_email = False,
)
facts = ((T("Total Services Rendered"), "sum(quantity)"),
(T("Number of Accepted Vouchers"), "count(id)"),
(T("Remaining Compensation Claims"), "sum(balance)"),
)
axes = ["program_id",
"status",
]
has_role = auth.s3_has_role
if has_role("PROGRAM_MANAGER"):
axes.insert(0, "pe_id")
if has_role("VOUCHER_PROVIDER"):
axes.append((T("User"), "created_by"))
report_options = {
"rows": axes,
"cols": axes,
"fact": facts,
"defaults": {"rows": axes[0],
"cols": None,
"fact": facts[0],
"totals": True,
},
}
s3db.configure("fin_voucher_debit",
report_options = report_options,
)
# -------------------------------------------------------------------------
def fin_voucher_debit_controller(**attr):
T = current.T
s3 = current.response.s3
# Enable bigtable features
current.deployment_settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
result = standard_prep(r) if callable(standard_prep) else True
db = current.db
s3db = current.s3db
resource = r.resource
# Catch inappropriate cancel-attempts
record = r.record
if record and not r.component and r.method == "cancel":
from ..helpers import can_cancel_debit
if not can_cancel_debit(record):
r.unauthorised()
has_role = current.auth.s3_has_role
if has_role("PROGRAM_ACCOUNTANT") and not has_role("PROGRAM_MANAGER"):
# PROGRAM_ACCOUNTANT can only see debits where they are assigned
# for the billing process
from ..helpers import get_role_realms
role_realms = get_role_realms("PROGRAM_ACCOUNTANT")
if role_realms is not None:
query = FS("billing_id$organisation_id$pe_id").belongs(role_realms)
resource.add_filter(query)
# PROGRAM_ACCOUNTANT does not (need to) see cancelled debits
resource.add_filter(FS("cancelled") == False)
# Check which programs and organisations the user can accept vouchers for
program_ids, org_ids, pe_ids = s3db.fin_voucher_permitted_programs(
mode = "provider",
partners_only = True,
)
table = resource.table
if not program_ids or not org_ids:
# User is not permitted to accept vouchers for any programs/providers
resource.configure(insertable = False)
else:
# Limit the program selector to permitted programs
field = table.program_id
ptable = s3db.fin_voucher_program
dbset = db(ptable.id.belongs(program_ids))
field.requires = IS_ONE_OF(dbset, "fin_voucher_program.id",
field.represent,
sort = True,
)
# Hide the program selector if only one program can be chosen
rows = dbset.select(ptable.id, limitby=(0, 2))
if len(rows) == 1:
field.default = rows.first().id
field.writable = False
# Limit the provider selector to permitted entities
etable = s3db.pr_pentity
field = table.pe_id
dbset = db(etable.pe_id.belongs(pe_ids))
field.requires = IS_ONE_OF(dbset, "pr_pentity.pe_id",
field.represent,
)
# Hide the provider selector if only one entity can be chosen
rows = dbset.select(etable.pe_id, limitby=(0, 2))
if len(rows) == 1:
field.default = rows.first().pe_id
field.readable = field.writable = False
# Always show quantity
if record:
field = table.quantity
field.readable = True
if r.interactive:
if r.get_vars.get("g") == "1":
s3.crud_strings["fin_voucher_debit"]["label_create"] = T("Accept Group Voucher")
return result
s3.prep = prep
# Custom rheader
from ..rheaders import rlpptm_fin_rheader
attr["rheader"] = rlpptm_fin_rheader
return attr
# -------------------------------------------------------------------------
def fin_voucher_program_resource(r, tablename):
T = current.T
table = current.s3db.fin_voucher_program
represent = lambda v, row=None: -v if v else current.messages["NONE"]
field = table.credit
field.label = T("Pending Credits")
field.represent = represent
field = table.compensation
field.label = T("Pending Compensation Claims")
field.represent = represent
# -------------------------------------------------------------------------
def fin_voucher_program_controller(**attr):
s3 = current.response.s3
# Enable bigtable features
current.deployment_settings.base.bigtable = True
# Custom prep
standard_prep = s3.prep
def prep(r):
# Call standard prep
result = standard_prep(r) if callable(standard_prep) else True
resource = r.resource
has_role = current.auth.s3_has_role
if has_role("PROGRAM_ACCOUNTANT") and not has_role("PROGRAM_MANAGER"):
# PROGRAM_ACCOUNTANT can only see programs where they are
# assigned for a billing process
from ..helpers import get_role_realms
role_realms = get_role_realms("PROGRAM_ACCOUNTANT")
if role_realms is not None:
query = FS("voucher_billing.organisation_id$pe_id").belongs(role_realms)
resource.add_filter(query)
return result
s3.prep = prep
return attr
# -------------------------------------------------------------------------
def billing_onaccept(form):
"""
Custom onaccept of billing:
- make sure all invoices are owned by the accountant
organisation (as long as they are the accountants in charge)
"""
# Get record ID
form_vars = form.vars
if "id" in form_vars:
record_id = form_vars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
return
db = current.db
s3db = current.s3db
# Get the billing/program organisations
table = s3db.fin_voucher_billing
ptable = s3db.fin_voucher_program
left = ptable.on((ptable.id == table.program_id) & \
(ptable.deleted == False))
query = (table.id == record_id)
row = db(query).select(table.id,
table.organisation_id,
ptable.organisation_id,
left = left,
limitby = (0, 1),
).first()
if not row:
return
# Identify the organisation to own the invoices under this process
billing = row.fin_voucher_billing
organisation_id = billing.organisation_id
if not organisation_id:
organisation_id = row.fin_voucher_program.organisation_id
# Update the realm entity as needed
if organisation_id:
pe_id = s3db.pr_get_pe_id("org_organisation", organisation_id)
itable = s3db.fin_voucher_invoice
query = (itable.billing_id == billing.id) & \
(itable.realm_entity != pe_id) & \
(itable.deleted == False)
current.auth.set_realm_entity(itable,
query,
entity = pe_id,
force_update = True,
)
# Re-assign pending invoices
from ..helpers import assign_pending_invoices
assign_pending_invoices(billing.id,
organisation_id = organisation_id,
)
# -------------------------------------------------------------------------
def fin_voucher_billing_resource(r, tablename):
s3db = current.s3db
table = current.s3db.fin_voucher_billing
# Color-coded representation of billing process status
field = table.status
from core import S3PriorityRepresent
status_opts = s3db.fin_voucher_billing_status_opts
field.represent = S3PriorityRepresent(status_opts,
{"SCHEDULED": "lightblue",
"IN PROGRESS": "amber",
"ABORTED": "black",
"COMPLETE": "green",
}).represent
# Custom onaccept to maintain realm-assignment of invoices
# when accountant organisation changes
s3db.add_custom_callback("fin_voucher_billing",
"onaccept",
billing_onaccept,
)
# -------------------------------------------------------------------------
def claim_create_onaccept(form):
"""
Custom create-onaccept for claim to notify the provider
accountant about the new claim
"""
# Get record ID
form_vars = form.vars
if "id" in form_vars:
record_id = form_vars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
return
T = current.T
db = current.db
s3db = current.s3db
table = s3db.fin_voucher_claim
btable = s3db.fin_voucher_billing
ptable = s3db.fin_voucher_program
join = [ptable.on(ptable.id == table.program_id),
btable.on(btable.id == table.billing_id),
]
query = (table.id == record_id)
row = db(query).select(table.id,
table.program_id,
table.billing_id,
table.pe_id,
table.status,
btable.date,
ptable.name,
ptable.organisation_id,
join = join,
limitby = (0, 1),
).first()
if not row:
return
program = row.fin_voucher_program
billing = row.fin_voucher_billing
claim = row.fin_voucher_claim
if claim.status != "NEW":
return
error = None
# Look up the provider organisation
pe_id = claim.pe_id
otable = s3db.org_organisation
provider = db(otable.pe_id == pe_id).select(otable.id,
otable.name,
limitby = (0, 1),
).first()
from ..helpers import get_role_emails
provider_accountants = get_role_emails("PROVIDER_ACCOUNTANT", pe_id)
if not provider_accountants:
error = "No provider accountant found"
if not error:
# Lookup the template variables
base_url = current.deployment_settings.get_base_public_url()
appname = current.request.application
data = {"program": program.name,
"date": btable.date.represent(billing.date),
"organisation": provider.name,
"url": "%s/%s/fin/voucher_claim/%s" % (base_url, appname, claim.id),
}
# Send the email notification
from ..notifications import CMSNotifications
error = CMSNotifications.send(provider_accountants,
"ClaimNotification",
data,
module = "fin",
resource = "voucher_claim",
)
if error:
# Inform the program manager that the provider could not be notified
msg = T("%(name)s could not be notified of new compensation claim: %(error)s") % \
{"name": provider.name, "error": error}
program_managers = get_role_emails("PROGRAM_MANAGER",
organisation_id = program.organisation_id,
)
if program_managers:
current.msg.send_email(to = program_managers,
subject = T("Provider Notification Failed"),
message = msg,
)
current.log.error(msg)
else:
current.log.debug("Provider '%s' notified about new compensation claim" % provider.name)
# -------------------------------------------------------------------------
def fin_voucher_claim_resource(r, tablename):
T = current.T
auth = current.auth
s3db = current.s3db
table = s3db.fin_voucher_claim
is_provider_accountant = auth.s3_has_role("PROVIDER_ACCOUNTANT")
if not is_provider_accountant:
# Hide comments
field = table.comments
field.readable = field.writable = False
# Color-coded representation of claim status
field = table.status
from core import S3PriorityRepresent
status_opts = s3db.fin_voucher_claim_status_opts
field.represent = S3PriorityRepresent(status_opts,
{"NEW": "lightblue",
"CONFIRMED": "blue",
"INVOICED": "amber",
"PAID": "green",
}).represent
# Custom list fields
list_fields = [#"refno",
"date",
"program_id",
#"pe_id",
"vouchers_total",
"quantity_total",
"amount_receivable",
"currency",
"status",
]
if is_provider_accountant:
list_fields.insert(0, "refno")
text_fields = ["refno",
"comments",
]
else:
list_fields.insert(2, "pe_id")
text_fields = ["pe_id$pe_id:org_organisation.name",
]
# Filter widgets
from core import TextFilter, OptionsFilter, get_filter_options
filter_widgets = [TextFilter(text_fields,
label = T("Search"),
),
OptionsFilter("program_id",
options = lambda: get_filter_options("fin_voucher_program"),
),
]
s3db.configure("fin_voucher_claim",
filter_widgets = filter_widgets,
list_fields = list_fields,
)
# PDF export method
from ..helpers import ClaimPDF
s3db.set_method("fin_voucher_claim",
method = "record",
action = ClaimPDF,
)
s3db.add_custom_callback("fin_voucher_claim",
"onaccept",
claim_create_onaccept,
method = "create",
)
# -------------------------------------------------------------------------
def fin_voucher_claim_controller(**attr):
T = current.T
s3 = current.response.s3
s3db = current.s3db
# Custom prep
standard_prep = s3.prep
def prep(r):
# Block all non-interactive update attempts
if not r.interactive and r.http != "GET":
r.error(403, current.ERROR.NOT_PERMITTED)
# Call standard prep
result = standard_prep(r) if callable(standard_prep) else True
# Check which programs and organisations the user can accept vouchers for
program_ids, org_ids = s3db.fin_voucher_permitted_programs(mode = "provider",
partners_only = True,
c = "fin",
f = "voucher_debit",
)[:2]
if not program_ids or not org_ids:
s3db.configure("fin_voucher_debit",
insertable = False,
)
return result
s3.prep = prep
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if not r.component and isinstance(output, dict):
record = r.record
if record and r.method in (None, "update", "read"):
# Hint that the user need to confirm the claim
if record.status == "NEW" and \
all(record[fn] for fn in ("account_holder", "account_number")):
current.response.warning = T('You must change the status to "confirmed" before an invoice can be issued')
# Custom CRUD buttons
if "buttons" not in output:
buttons = output["buttons"] = {}
else:
buttons = output["buttons"]
# PDF-button
pdf_download = A(T("Download PDF"),
_href = "/%s/fin/voucher_claim/%s/record.pdf" % \
(r.application, record.id),
_class="action-btn",
)
# Render in place of the delete-button
buttons["delete_btn"] = TAG[""](pdf_download,
)
return output
s3.postp = custom_postp
return attr
# -------------------------------------------------------------------------
def invoice_onsettled(invoice):
"""
Callback to notify the provider that an invoice has been settled
Args:
invoice: the invoice (Row)
"""
db = current.db
s3db = current.s3db
# Look up claim, invoice number, program and billing
btable = s3db.fin_voucher_billing
ctable = s3db.fin_voucher_claim
itable = s3db.fin_voucher_invoice
ptable = s3db.fin_voucher_program
join = [ptable.on(ptable.id == ctable.program_id),
btable.on(btable.id == ctable.billing_id),
itable.on(itable.id == ctable.invoice_id),
]
query = (ctable.invoice_id == invoice.id) & \
(ctable.deleted == False)
row = db(query).select(ctable.id,
ctable.program_id,
ctable.billing_id,
ctable.pe_id,
btable.date,
itable.invoice_no,
ptable.name,
ptable.organisation_id,
join = join,
limitby = (0, 1),
).first()
if not row:
return
program = row.fin_voucher_program
billing = row.fin_voucher_billing
claim = row.fin_voucher_claim
invoice_no = row.fin_voucher_invoice.invoice_no
error = None
# Look up the provider organisation
pe_id = claim.pe_id
otable = s3db.org_organisation
provider = db(otable.pe_id == pe_id).select(otable.id,
otable.name,
limitby = (0, 1),
).first()
from ..helpers import get_role_emails
provider_accountants = get_role_emails("PROVIDER_ACCOUNTANT", pe_id)
if not provider_accountants:
error = "No provider accountant found"
if not error:
# Lookup the template variables
base_url = current.deployment_settings.get_base_public_url()
appname = current.request.application
data = {"program": program.name,
"date": btable.date.represent(billing.date),
"invoice": invoice_no,
"organisation": provider.name,
"url": "%s/%s/fin/voucher_claim/%s" % (base_url, appname, claim.id),
}
# Send the email notification
from ..notifications import CMSNotifications
error = CMSNotifications.send(provider_accountants,
"InvoiceSettled",
data,
module = "fin",
resource = "voucher_invoice",
)
if error:
msg = "%s could not be notified about invoice settlement: %s"
current.log.error(msg % (provider.name, error))
else:
msg = "%s notified about invoice settlement"
current.log.debug(msg % provider.name)
# -------------------------------------------------------------------------
def invoice_create_onaccept(form):
"""
Custom create-onaccept to assign a new invoice to an
accountant
"""
# Get record ID
form_vars = form.vars
if "id" in form_vars:
record_id = form_vars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
return
# Look up the billing ID
table = current.s3db.fin_voucher_invoice
query = (table.id == record_id)
invoice = current.db(query).select(table.billing_id,
limitby = (0, 1),
).first()
if invoice:
# Assign the invoice
from ..helpers import assign_pending_invoices
assign_pending_invoices(invoice.billing_id,
invoice_id = record_id,
)
# -------------------------------------------------------------------------
def fin_voucher_invoice_resource(r, tablename):
T = current.T
auth = current.auth
s3db = current.s3db
table = s3db.fin_voucher_invoice
# Color-coded representation of invoice status
from core import S3PriorityRepresent
field = table.status
try:
status_opts = field.requires.options()
except AttributeError:
status_opts = []
else:
field.represent = S3PriorityRepresent(status_opts,
{"NEW": "lightblue",
"APPROVED": "blue",
"REJECTED": "red",
"PAID": "green",
})
is_accountant = auth.s3_has_role("PROGRAM_ACCOUNTANT")
# Personal work list?
if is_accountant and r.get_vars.get("mine") == "1":
title_list = T("My Work List")
default_status = ["NEW", "REJECTED"]
default_hr = current.auth.s3_logged_in_human_resource()
else:
title_list = T("All Invoices")
default_status = default_hr = None
current.response.s3.crud_strings["fin_voucher_invoice"].title_list = title_list
# Lookup method for HR filter options
if is_accountant:
def hr_filter_opts():
hresource = s3db.resource("hrm_human_resource")
rows = hresource.select(["id", "person_id"], represent=True).rows
return {row["hrm_human_resource.id"]:
row["hrm_human_resource.person_id"] for row in rows}
else:
hr_filter_opts = None
# Filter widgets
from core import DateFilter, OptionsFilter, TextFilter
if r.interactive:
filter_widgets = [TextFilter(["invoice_no",
"refno",
],
label = T("Search"),
),
OptionsFilter("status",
default = default_status,
options = OrderedDict(status_opts),
sort = False,
),
OptionsFilter("human_resource_id",
default = default_hr,
options = hr_filter_opts,
),
DateFilter("date",
hidden = True,
),
OptionsFilter("pe_id",
hidden = True,
),
OptionsFilter("pe_id$pe_id:org_organisation.facility.location_id$L2",
hidden = True,
),
]
s3db.configure("fin_voucher_invoice",
filter_widgets = filter_widgets,
)
# Custom create-onaccept to assign the invoice
s3db.add_custom_callback("fin_voucher_invoice",
"onaccept",
invoice_create_onaccept,
method = "create",
)
# PDF export method
from ..helpers import InvoicePDF
s3db.set_method("fin_voucher_invoice",
method = "record",
action = InvoicePDF,
)
# Callback when invoice is settled
s3db.configure("fin_voucher_invoice",
onsettled = invoice_onsettled,
)
# -------------------------------------------------------------------------
def fin_voucher_invoice_controller(**attr):
T = current.T
s3 = current.response.s3
# Enable bigtable features
current.deployment_settings.base.bigtable = True
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if not r.component and isinstance(output, dict):
if r.record and r.method in (None, "update", "read"):
# Custom CRUD buttons
if "buttons" not in output:
buttons = output["buttons"] = {}
else:
buttons = output["buttons"]
# PDF-button
pdf_download = A(T("Download PDF"),
_href = "/%s/fin/voucher_invoice/%s/record.pdf" % \
(r.application, r.record.id),
_class="action-btn",
)
# Render in place of the delete-button
buttons["delete_btn"] = TAG[""](pdf_download,
)
return output
s3.postp = custom_postp
# Custom rheader
from ..rheaders import rlpptm_fin_rheader
attr["rheader"] = rlpptm_fin_rheader
return attr
# END =========================================================================
|
the-stack_106_27412 | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 14:50, 20/04/2020 %
# %
# Email: [email protected] %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from opfunu.cec.cec2005.root import Root
from numpy import dot, max, abs, array
from pandas import read_csv
class Model(Root):
def __init__(self, f_name="Schwefel's Problem 2.6 with Global Optimum on Bounds", f_shift_data_file="data_schwefel_206",
f_ext='.txt', f_bias=-310):
Root.__init__(self, f_name, f_shift_data_file, f_ext, f_bias)
def load_shift_data(self):
data = read_csv(self.support_path_data + self.f_shift_data_file + self.f_ext, delimiter='\s+', index_col=False, header=None)
data = data.values
shift_data = data[:1, :]
matrix_data = data[1:, :]
return shift_data, matrix_data
def _main__(self, solution=None):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2005 not support for problem size > 100")
return 1
shift_data, matrix_data = self.load_shift_data()
shift_data = shift_data.reshape(-1)[:problem_size]
t1 = int(0.25 * problem_size) + 1
t2 = int(0.75 * problem_size)
shift_data[:t1] = -100
shift_data[t2:] = 100
matrix_data = matrix_data[:problem_size, :problem_size]
result = []
for i in range(0, problem_size):
temp = abs(dot(matrix_data[i], solution) - dot(matrix_data[i], shift_data))
result.append(temp)
return max(array(result)) + self.f_bias
|
the-stack_106_27414 | # Implements a quick and dirty genetic algorithm to search hyperparameters
# Would be better (and more general) with object-oriented re-implementation
# each hyperparameter is its own class with methods on how it varies, is randomly generated, etc
# overall hyperparameters class that has a dictionary of its hyperparameters
# Uses pandas dataframe
import pandas as pd
import numpy as np
from numpy import random
import tensorflow as tf
import deepchem as dc
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
from math import ceil, log
# set global variables for min/max for each parameter
N_HIDDEN = [10, 80]
N_LAYERS = [1, 7]
LEARNING_RATE = [-3, 0]
LEARNING_RATE_TYPE = 'log_uniform'
DROPOUT_PROB = [0.2, 0.8]
N_EPOCHS = [10, 80]
BATCH_SIZE = [8, 1024]
def import_dc_data():
"""import the deepchem data, delete additional task labels, export train/validation/test sets"""
_, (train, valid, test), _ = dc.molnet.load_tox21()
train_X, train_y, train_w = train.X, train.y, train.w
valid_X, valid_y, valid_w = valid.X, valid.y, valid.w
test_X, test_y, test_w = test.X, test.y, test.w
# Remove extra tasks
train_y = train_y[:, 0]
valid_y = valid_y[:, 0]
test_y = test_y[:, 0]
train_w = train_w[:, 0]
valid_w = valid_w[:, 0]
test_w = test_w[:, 0]
# return the data as a dictionary
dc_data = {'train_X': train_X, 'valid_X': valid_X, 'test_X': test_X,
'train_y': train_y, 'valid_y': valid_y, 'test_y': test_y,
'train_w': train_w, 'valid_w': valid_w, 'test_w': test_w}
return dc_data
def eval_tox21_hyperparams(dc_data, n_hidden=50, n_layers=1, learning_rate=.001,
dropout_prob=0.5, n_epochs=45, batch_size=100,
weight_positives=True):
d = 1024
graph = tf.Graph()
with graph.as_default():
# Generate tensorflow graph
with tf.name_scope("placeholders"):
x = tf.placeholder(tf.float32, (None, d))
y = tf.placeholder(tf.float32, (None,))
w = tf.placeholder(tf.float32, (None,))
keep_prob = tf.placeholder(tf.float32)
for layer in range(n_layers):
with tf.name_scope("layer-%d" % layer):
W = tf.Variable(tf.random_normal((d, n_hidden)))
b = tf.Variable(tf.random_normal((n_hidden,)))
x_hidden = tf.nn.relu(tf.matmul(x, W) + b)
# Apply dropout
x_hidden = tf.nn.dropout(x_hidden, keep_prob)
with tf.name_scope("output"):
W = tf.Variable(tf.random_normal((n_hidden, 1)))
b = tf.Variable(tf.random_normal((1,)))
y_logit = tf.matmul(x_hidden, W) + b
# the sigmoid gives the class probability of 1
y_one_prob = tf.sigmoid(y_logit)
# Rounding P(y=1) will give the correct prediction.
y_pred = tf.round(y_one_prob)
with tf.name_scope("loss"):
# Compute the cross-entropy term for each datapoint
y_expand = tf.expand_dims(y, 1)
entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit, labels=y_expand)
# Multiply by weights
if weight_positives:
w_expand = tf.expand_dims(w, 1)
entropy = w_expand * entropy
# Sum all contributions
l = tf.reduce_sum(entropy)
with tf.name_scope("optim"):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(l)
with tf.name_scope("summaries"):
tf.summary.scalar("loss", l)
merged = tf.summary.merge_all()
# For tensorboard visualization
# hyperparam_str = "d-%d-hidden-%d-lr-%f-n_epochs-%d-batch_size-%d-weight_pos-%s" % (
# d, n_hidden, learning_rate, n_epochs, batch_size, str(weight_positives))
# train_writer = tf.summary.FileWriter('/tmp/fcnet-func-' + hyperparam_str,
# tf.get_default_graph())
N = dc_data['train_X'].shape[0]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(n_epochs):
pos = 0
while pos < N:
batch_X = dc_data['train_X'][pos:pos+batch_size]
batch_y = dc_data['train_y'][pos:pos+batch_size]
batch_w = dc_data['train_w'][pos:pos+batch_size]
feed_dict = {x: batch_X, y: batch_y, w: batch_w, keep_prob: dropout_prob}
_, summary, loss = sess.run([train_op, merged, l], feed_dict=feed_dict)
# print("epoch %d, step %d, loss: %f" % (epoch, step, loss))
# train_writer.add_summary(summary, step)
step += 1
pos += batch_size
# Make Predictions (set keep_prob to 1.0 for predictions)
valid_y_pred = sess.run(y_pred, feed_dict={x: dc_data['valid_X'], keep_prob: 1.0})
valid_y = dc_data['valid_y'] # get labels
acc = accuracy_score(valid_y, valid_y_pred, sample_weight=dc_data['valid_w'])
prec = precision_score(valid_y, valid_y_pred) # can't weight?
recall = recall_score(valid_y, valid_y_pred) # can't weight?
roc_auc = roc_auc_score(valid_y, valid_y_pred)
return (acc, prec, recall, roc_auc)
# build a dataframe with for model hyperparameters and evaluation metrics
def eval_log(name=None, n_hidden=50, n_layers=1, learning_rate=.001,
dropout_prob=0.5, n_epochs=45, batch_size=100,
weight_positives=True):
""" Evaluates the model on the hyperparameters supplied as arguments,
returns the results as a pd.Series """
# run the model
(acc, prec, rec, auc) = eval_tox21_hyperparams(dc_data, n_hidden=n_hidden, n_layers=n_layers,
learning_rate=learning_rate,
dropout_prob=dropout_prob, n_epochs=n_epochs,
batch_size=batch_size, weight_positives=weight_positives)
# create a dict
hparams = {'n_hidden': n_hidden,
'n_layers': n_layers,
'learning_rate': learning_rate,
'dropout_prob': dropout_prob,
'batch_size': batch_size,
'weight_positives': weight_positives,
'accuracy_score': acc,
'precision_score': prec,
'recall_score': rec,
'auc': auc}
return pd.Series(hparams, name=name, index=hparams.keys())
def get_random_hparams(n, n_hidden=N_HIDDEN, n_layers=N_LAYERS, learning_rate=LEARNING_RATE,
learning_rate_type=LEARNING_RATE_TYPE,
dropout_prob=DROPOUT_PROB, n_epochs=N_EPOCHS, batch_size=BATCH_SIZE):
""" creates n sets of hyperparameters randomly. default arguments represent random bounds. weight_positives is
probability of True"""
arr_n_hidden = random.randint(n_hidden[0], n_hidden[1], size=n, dtype=int)
arr_n_layers = random.randint(n_layers[0], n_layers[1], size=n, dtype=int)
rand_lr = min(learning_rate) + ((learning_rate[1] - learning_rate[0]) * random.rand(n))
if learning_rate_type == 'log_uniform':
arr_learning_rate = np.power(10, rand_lr)
else:
arr_learning_rate = rand_lr
arr_dropout_prob = min(dropout_prob) + ((dropout_prob[1] - dropout_prob[0]) * random.rand(n))
arr_n_epochs = random.randint(n_epochs[0], n_epochs[1], size=n, dtype=int)
arr_batch_size = random.randint(batch_size[0], batch_size[1], size=n, dtype=int)
arr_weight_positives = random.choice([True, False], size=n)
return (arr_n_hidden, arr_n_layers, arr_learning_rate, arr_dropout_prob,
arr_n_epochs, arr_batch_size, arr_weight_positives)
def run_n_models(dc_data, hparams_tuple):
""" takes a dictionary of hyperparameters (each is an array) and runs the model on all the params in the array.
returns a dictionary of arrays with output metrics """
(arr_n_hidden, arr_n_layers, arr_learning_rate,
arr_dropout_prob, arr_n_epochs, arr_batch_size, arr_weight_positives) = hparams_tuple
# create empty arrays for output metrics
n = len(arr_n_hidden)
acc = np.zeros(n)
prec = np.zeros(n)
rec = np.zeros(n)
auc = np.zeros(n)
# use a dirty for loop for now
for i in range(n):
(acc[i], prec[i], rec[i], auc[i]) = eval_tox21_hyperparams(dc_data, n_hidden=arr_n_hidden[i],
n_layers=arr_n_layers[i],
learning_rate=arr_learning_rate[i],
dropout_prob=arr_dropout_prob[i],
n_epochs=arr_n_epochs[i],
batch_size=arr_batch_size[i],
weight_positives=arr_weight_positives[i])
# return tuple of arrays
return (acc, prec, rec, auc)
def eval_n_models(dc_data, n=5, generation=None, init="random", hparams=None):
"""evaluates n different models. Generates hyperparameters randomly if not specified."""
if init == 'hparams':
params = hparams
# default to init='random'
else:
params = get_random_hparams(n)
(acc, prec, rec, auc) = run_n_models(dc_data, params)
# if epoch is specified, write it as a column
dict = {'generation': pd.Series(np.full(n, generation)),
'n_hidden': pd.Series(params[0]),
'n_layers': pd.Series(params[1]),
'learning_rate': pd.Series(params[2]),
'dropout_prob': pd.Series(params[3]),
'n_epochs': pd.Series(params[4]),
'batch_size': pd.Series(params[5]),
'weight_positives': pd.Series(params[6]),
'acc': pd.Series(acc),
'prec': pd.Series(prec),
'rec': pd.Series(rec),
'auc': pd.Series(auc)}
df = pd.DataFrame.from_dict(dict)
return df
def get_hparams_from_dataframe(df):
""" gets the hparams from the supplied dataframe and returns them as a tuple of numpy arrays """
arr_n_hidden = np.asarray(df.loc[:, 'n_hidden'])
arr_n_layers = np.asarray(df.loc[:, 'n_layers'])
arr_learning_rate = np.asarray(df.loc[:, 'learning_rate'])
arr_dropout_prob = np.asarray(df.loc[:, 'dropout_prob'])
arr_n_epochs = np.asarray(df.loc[:, 'n_epochs'])
arr_batch_size = np.asarray(df.loc[:, 'batch_size'])
arr_weight_positives = np.asarray(df.loc[:, 'weight_positives'])
return (arr_n_hidden, arr_n_layers, arr_learning_rate, arr_dropout_prob,
arr_n_epochs, arr_batch_size, arr_weight_positives)
def ga_calc_next_gen(prev_generation, generation_size, metric='auc'):
""" calculates the hyperparameters for the next generation"""
# initialize empty arrays for next generation
arr_n_hidden = np.zeros(generation_size, dtype=int)
arr_n_layers = np.zeros(generation_size, dtype=int)
arr_learning_rate = np.zeros(generation_size, dtype=np.float32)
arr_dropout_prob = np.zeros(generation_size, dtype=np.float32)
arr_n_epochs = np.zeros(generation_size, dtype=int)
arr_batch_size = np.zeros(generation_size, dtype=int)
arr_weight_positives = np.zeros(generation_size, dtype=bool)
# sort the previous generation by desired metric
sortd = prev_generation.sort_values(metric, ascending=False)
# split the previous generaton into quartiles
q1 = ceil(generation_size * 0.25)
q2 = ceil(generation_size * 0.50)
q3 = ceil(generation_size * 0.75)
# top quartile go straight through
arr_n_hidden[0:q1] = sortd.iloc[0:q1].loc[:, 'n_hidden']
arr_n_layers[0:q1] = sortd.iloc[0:q1].loc[:, 'n_layers']
arr_learning_rate[0:q1] = sortd.iloc[0:q1].loc[:, 'learning_rate']
arr_dropout_prob[0:q1] = sortd.iloc[0:q1].loc[:, 'dropout_prob']
arr_n_epochs[0:q1] = sortd.iloc[0:q1].loc[:, 'n_epochs']
arr_batch_size[0:q1] = sortd.iloc[0:q1].loc[:, 'batch_size']
arr_weight_positives[0:q1] = sortd.iloc[0:q1].loc[:, 'weight_positives']
# second quartile are crossed from first quartile
# get a shuffled view of the top quartile
shuf = np.arange(q1)
np.random.shuffle(shuf) # shuffle in place
shuffled = sortd.iloc[0:q1].iloc[shuf]
for i in range(q2 - q1):
arr_n_hidden[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['n_hidden'],
shuffled.iloc[i].loc['n_hidden']]))
arr_n_layers[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['n_layers'],
shuffled.iloc[i].loc['n_layers']]))
arr_learning_rate[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['learning_rate'],
shuffled.iloc[i].loc['learning_rate']]))
arr_dropout_prob[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['dropout_prob'],
shuffled.iloc[i].loc['dropout_prob']]))
arr_n_epochs[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['n_epochs'],
shuffled.iloc[i].loc['n_epochs']]))
arr_batch_size[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['batch_size'],
shuffled.iloc[i].loc['batch_size']]))
arr_weight_positives[q1 + i] = np.random.choice(np.asarray([sortd.iloc[i].loc['weight_positives'],
shuffled.iloc[i].loc['weight_positives']]))
# third quartile are mutations of first quartile
for i in range(q3 - q2):
arr_n_hidden[q2 + i] = int(np.random.normal(loc=sortd.iloc[i].loc['n_hidden'],
scale=sortd.loc[:, 'n_hidden'].std()))
arr_n_layers[q2 + i] = int(np.random.normal(loc=sortd.iloc[i].loc['n_layers'],
scale=sortd.loc[:, 'n_layers'].std()))
# Learning rate is exponential so this needs to be a log normal dist
lr_log = np.log10(sortd.iloc[i].loc['learning_rate']) # get log10 of the learning rate
# get std of the log10 learning rates
lr_log_std = np.log10(sortd.loc[:, 'learning_rate']).std()
print("log lr = ", lr_log, "lr_log_std", lr_log_std)
arr_learning_rate[q2 + i] = np.power(10, np.random.normal(loc=lr_log,
scale=lr_log_std))
# arr_learning_rate[q2 + i] = np.random.normal(loc=sortd.iloc[i].loc['learning_rate'],
# scale=sortd.loc[:, 'learning_rate'].std())
arr_dropout_prob[q2 + i] = np.random.normal(loc=sortd.iloc[i].loc['dropout_prob'],
scale=sortd.loc[:, 'dropout_prob'].std())
arr_n_epochs[q2 + i] = int(np.random.normal(loc=sortd.iloc[i].loc['n_epochs'],
scale=sortd.loc[:, 'n_epochs'].std()))
arr_batch_size[q2 + i] = int(np.random.normal(loc=sortd.iloc[i].loc['batch_size'],
scale=sortd.loc[:, 'batch_size'].std()))
# randomly flip weight_positives with probability 0.25
t = sortd.iloc[i].loc['weight_positives']
arr_weight_positives[q2 + i] = np.random.choice([t, 1-t], p=[0.75, 0.25])
# clip third quartile to within sensible bounds
np.clip(arr_n_hidden[q2:q3], N_HIDDEN[0], N_HIDDEN[1], out=arr_n_hidden[q2:q3])
np.clip(arr_n_layers[q2:q3], N_LAYERS[0], N_LAYERS[1], out=arr_n_layers[q2:q3])
# learning rate varies as exp, clip between 0 and 1 should be fine
np.clip(arr_learning_rate[q2:q3], 0, 1, out=arr_learning_rate[q2:q3])
np.clip(arr_dropout_prob[q2:q3], DROPOUT_PROB[0], DROPOUT_PROB[1], out=arr_dropout_prob[q2:q3])
np.clip(arr_n_epochs[q2:q3], N_EPOCHS[0], N_EPOCHS[1], out=arr_n_epochs[q2:q3])
np.clip(arr_batch_size[q2:q3], BATCH_SIZE[0], BATCH_SIZE[1], out=arr_batch_size[q2:q3])
# fourth quartile are randomly generated
(rand_n_hidden, rand_n_layers, rand_learning_rate, rand_dropout_prob,
rand_n_epochs, rand_batch_size, rand_weight_positives) = get_random_hparams(n=generation_size-q3)
arr_n_hidden[q3:generation_size] = rand_n_hidden
arr_n_layers[q3:generation_size] = rand_n_layers
arr_learning_rate[q3:generation_size] = rand_learning_rate
arr_dropout_prob[q3:generation_size] = rand_dropout_prob
arr_n_epochs[q3:generation_size] = rand_n_epochs
arr_batch_size[q3:generation_size] = rand_batch_size
arr_weight_positives[q3:generation_size] = rand_weight_positives
return (arr_n_hidden, arr_n_layers, arr_learning_rate, arr_dropout_prob,
arr_n_epochs, arr_batch_size, arr_weight_positives)
def ga_run_generation(df, dc_data, generation_size=8):
""" Run a single generation"""
# get the generation number for the previous generation
n_prev = max(df['generation'])
# slice the df to get just the last generation
prev = df.loc[df.loc[:, 'generation'] == n_prev]
# compute the parameters for the next generation
next_params = ga_calc_next_gen(prev, generation_size=generation_size, metric='auc')
# run the next generation and return it
next = eval_n_models(dc_data, n=generation_size, generation=n_prev +
1, init='hparams', hparams=next_params)
return next
def ga_run(dc_data, seed=False, seed_pop=None, generation_size=20, n_generations=5):
""" Run the genetic algorithm for n_generations"""
# if no seed generation is supplied, create one
if seed:
pop = seed_pop
else:
pop = eval_n_models(dc_data, n=generation_size, generation=0)
print("Generation 0 (seed):\n", pop.sort_values('auc', ascending=False))
for g in range(1, n_generations):
# run the next generation
next = ga_run_generation(pop, dc_data, generation_size=generation_size)
next = next.sort_values('auc', ascending=False)
print("Generation ", g, ":")
print("next:", next)
pop = pop.append(next)
return pop
if __name__ == "__main__":
# Code here to execute, might be better in an ipynb?
dc_data = import_dc_data()
# create a dataframe by running eval_n_models
df_genetic = ga_run(dc_data, generation_size=20, n_generations=10)
|
the-stack_106_27416 | from subprocess import call
from subprocess import check_output
def find_group_id() -> str:
"""
Finds primay group ID and returns as string
"""
cmd = 'dscl . -list /groups PrimaryGroupID|grep staff|tr -s [:space:]'
out = check_output([cmd], shell=True)
res = out.decode('UTF-8').strip('\n').split(' ')
return res[1]
def find_user_id() -> str:
"""
Find out the next available user ID
"""
cmd = "dscl . -list /Users UniqueID|awk '{{print $2}}'|sort -ug|tail -1"
out = check_output([cmd], shell=True)
# Decode from bytes to string, remove newline and convert to int
max = int(out.decode('UTF-8').strip('\n'))
return str(max + 1)
class OSX:
""" Class for executing OSX utilities commands """
def __init__(self, utility, user, shell='/usr/bin/false', group='staff'):
"""
Class constructor
"""
self.utility = utility
# self.password = password
self.user = user
self.shell = shell
self.group = group
self.base_cmd = ''
def execute(self):
"""
Executes commands
"""
if self.utility == 'useradd':
self.call_useradd()
elif self.utility == 'userdel':
self.call_userdel()
else:
print("Command not recognized")
def osx_call(self, command):
"""
Real execution of terminal command
"""
call('sudo -S {}'.format(command), shell=True)
def call_useradd(self):
"""
Adds a new user
"""
base_cmd = 'dscl . -create /Users/{}'.format(self.user)
self.osx_call(base_cmd)
userid = base_cmd + ' UniqueID {}'.format(find_user_id())
self.osx_call(userid)
group = base_cmd + ' PrimaryGroupID {}'.format(find_group_id())
self.osx_call(group)
def call_userdel(self):
"""
Deletes user
"""
self.delete_from_groups()
userdel = 'dscl . -delete /Users/{}'.format(self.user)
self.osx_call(userdel)
def delete_from_groups(self):
"""
Deletes user from his groups
"""
out = check_output(['groups', '{}'.format(self.user)], shell=True)
groups = out.decode('UTF-8').strip('\n').split(' ')
for group in groups:
cmd = 'dseditgroup -o edit -d {} -t user {}'.format(self.user, group)
self.osx_call(cmd)
|
the-stack_106_27417 | import logging
import time
from abc import abstractmethod, ABC, ABCMeta
from spaceone.core import config, utils, cache
from spaceone.core.manager import BaseManager
from spaceone.core.auth.jwt.jwt_util import JWTUtil
from spaceone.identity.error.error_authentication import *
__all__ = ['TokenManager', 'JWTManager']
_LOGGER = logging.getLogger(__name__)
class TokenManager(BaseManager, ABC):
is_authenticated = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._load_conf()
@abstractmethod
def issue_token(self, **kwargs):
pass
@abstractmethod
def refresh_token(self, user_id, domain_id, **kwargs):
pass
@abstractmethod
def authenticate(self, user_id, domain_id, credentials):
pass
@abstractmethod
def check_refreshable(self, key, ttl):
pass
def _load_conf(self):
identity_conf = config.get_global('IDENTITY') or {}
token_conf = identity_conf.get('token', {})
self.CONST_TOKEN_TIMEOUT = token_conf.get('token_timeout', 1800)
self.CONST_REFRESH_TIMEOUT = token_conf.get('refresh_timeout', 3600)
self.CONST_REFRESH_TTL = token_conf.get('refresh_ttl', -1)
self.CONST_REFRESH_ONCE = token_conf.get('refresh_once', True)
class JWTManager(TokenManager, metaclass=ABCMeta):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.old_refresh_key = None
def issue_token(self, **kwargs):
raise NotImplementedError('TokenManager.issue_token not implemented!')
def refresh_token(self, user_id, domain_id, **kwargs):
raise NotImplementedError('TokenManager.refresh_token not implemented!')
def authenticate(self, user_id, domain_id, credentials):
raise NotImplementedError('TokenManager.authenticate not implemented!')
def check_refreshable(self, refresh_key, ttl):
if self.CONST_REFRESH_ONCE:
if cache.is_set() and cache.get(f'refresh-token:{refresh_key}') is None:
raise ERROR_INVALID_REFRESH_TOKEN()
if ttl == 0:
raise ERROR_REFRESH_COUNT()
self.is_authenticated = True
self.old_refresh_key = refresh_key
def issue_access_token(self, user_type, user_id, domain_id, **kwargs):
private_jwk = self._get_private_jwk(kwargs)
timeout = kwargs.get('timeout', self.CONST_TOKEN_TIMEOUT)
if user_id == '[email protected]':
timeout = 60
payload = {
'cat': 'ACCESS_TOKEN',
'user_type': user_type,
'did': domain_id,
'aud': user_id,
'iat': int(time.time()),
'exp': int(time.time() + timeout)
}
encoded = JWTUtil.encode(payload, private_jwk)
return encoded
def issue_refresh_token(self, user_type, user_id, domain_id, **kwargs):
refresh_private_jwk = self._get_refresh_private_jwk(kwargs)
ttl = kwargs.get('ttl', self.CONST_REFRESH_TTL)
timeout = kwargs.get('timeout', self.CONST_REFRESH_TIMEOUT)
refresh_key = self._generate_refresh_key()
if user_id == '[email protected]':
timeout = 120
payload = {
'cat': 'REFRESH_TOKEN',
'user_type': user_type,
'did': domain_id,
'aud': user_id,
'iat': int(time.time()),
'exp': int(time.time() + timeout),
"key": refresh_key,
'ttl': ttl
}
encoded = JWTUtil.encode(payload, refresh_private_jwk)
if self.CONST_REFRESH_ONCE:
self._set_refresh_token_cache(refresh_key)
return encoded
@staticmethod
def _generate_refresh_key():
return utils.random_string(16)
@staticmethod
def _get_private_jwk(kwargs):
if 'private_jwk' not in kwargs:
raise ERROR_NOT_FOUND_PRIVATE_KEY(purpose='Access Token')
return kwargs['private_jwk']
@staticmethod
def _get_refresh_private_jwk(kwargs):
if 'refresh_private_jwk' not in kwargs:
raise ERROR_NOT_FOUND_PRIVATE_KEY(purpose='Refresh Token')
return kwargs['refresh_private_jwk']
def _set_refresh_token_cache(self, new_refresh_key):
if cache.is_set():
if self.old_refresh_key:
cache.delete(f'refresh-token:{self.old_refresh_key}')
cache.set(f'refresh-token:{new_refresh_key}', '', expire=self.CONST_REFRESH_TIMEOUT)
|
the-stack_106_27419 | import logging
import os
import tempfile
from contextlib import contextmanager
from typing import TYPE_CHECKING, Optional
from funcy import cached_property, first
from dvc.exceptions import DvcException
from dvc.utils import dict_sha256, relpath
if TYPE_CHECKING:
from dvc.objects.db.base import ObjectDB
logger = logging.getLogger(__name__)
class RunCacheNotFoundError(DvcException):
def __init__(self, stage):
super().__init__(f"No run-cache for {stage.addressing}")
def _get_cache_hash(cache, key=False):
from dvc.data.meta import Meta
if key:
cache["outs"] = [out["path"] for out in cache.get("outs", [])]
return dict_sha256(cache, exclude=[Meta.PARAM_SIZE, Meta.PARAM_NFILES])
def _can_hash(stage):
if stage.is_callback or stage.always_changed:
return False
if not all([stage.cmd, stage.deps, stage.outs]):
return False
for dep in stage.deps:
if not (dep.scheme == "local" and dep.def_path and dep.get_hash()):
return False
for out in stage.outs:
if out.scheme != "local" or not out.def_path or out.persist:
return False
return True
def _get_stage_hash(stage):
from .serialize import to_single_stage_lockfile
assert _can_hash(stage)
return _get_cache_hash(to_single_stage_lockfile(stage), key=True)
class StageCache:
def __init__(self, repo):
self.repo = repo
@cached_property
def cache_dir(self):
return os.path.join(self.repo.odb.local.cache_dir, "runs")
def _get_cache_dir(self, key):
return os.path.join(self.cache_dir, key[:2], key)
def _get_cache_path(self, key, value):
return os.path.join(self._get_cache_dir(key), value)
def _load_cache(self, key, value):
from voluptuous import Invalid
from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA
from dvc.utils.serialize import YAMLFileCorruptedError, load_yaml
path = self._get_cache_path(key, value)
try:
return COMPILED_LOCK_FILE_STAGE_SCHEMA(load_yaml(path))
except FileNotFoundError:
return None
except (YAMLFileCorruptedError, Invalid):
logger.warning("corrupted cache file '%s'.", relpath(path))
os.unlink(path)
return None
def _load(self, stage):
key = _get_stage_hash(stage)
if not key:
return None
cache_dir = self._get_cache_dir(key)
if not os.path.exists(cache_dir):
return None
for value in os.listdir(cache_dir):
cache = self._load_cache(key, value)
if cache:
return cache
return None
def _create_stage(self, cache, wdir=None):
from . import PipelineStage, create_stage
from .loader import StageLoader
stage = create_stage(
PipelineStage,
repo=self.repo,
path="dvc.yaml",
cmd=cache["cmd"],
wdir=wdir,
outs=[out["path"] for out in cache["outs"]],
external=True,
)
StageLoader.fill_from_lock(stage, cache)
return stage
@contextmanager
def _cache_type_copy(self):
cache_types = self.repo.odb.local.cache_types
self.repo.odb.local.cache_types = ["copy"]
try:
yield
finally:
self.repo.odb.local.cache_types = cache_types
def _uncached_outs(self, stage, cache):
# NOTE: using temporary stage to avoid accidentally modifying original
# stage and to workaround `commit/checkout` not working for uncached
# outputs.
cached_stage = self._create_stage(cache, wdir=stage.wdir)
outs_no_cache = [
out.def_path for out in stage.outs if not out.use_cache
]
# NOTE: using copy link to make it look like a git-tracked file
with self._cache_type_copy():
for out in cached_stage.outs:
if out.def_path in outs_no_cache:
yield out
def save(self, stage):
from .serialize import to_single_stage_lockfile
if not _can_hash(stage):
return
cache_key = _get_stage_hash(stage)
cache = to_single_stage_lockfile(stage)
cache_value = _get_cache_hash(cache)
existing_cache = self._load_cache(cache_key, cache_value)
cache = existing_cache or cache
for out in self._uncached_outs(stage, cache):
out.commit()
if existing_cache:
return
from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA
from dvc.utils.serialize import dump_yaml
# sanity check
COMPILED_LOCK_FILE_STAGE_SCHEMA(cache)
path = self._get_cache_path(cache_key, cache_value)
parent = self.repo.odb.local.fs.path.parent(path)
self.repo.odb.local.makedirs(parent)
tmp = tempfile.NamedTemporaryFile(delete=False, dir=parent).name
assert os.path.exists(parent)
assert os.path.isdir(parent)
dump_yaml(tmp, cache)
self.repo.odb.local.move(tmp, path)
def restore(self, stage, run_cache=True, pull=False):
from .serialize import to_single_stage_lockfile
if not _can_hash(stage):
raise RunCacheNotFoundError(stage)
if (
not stage.changed_stage()
and stage.deps_cached()
and all(bool(out.hash_info) for out in stage.outs)
):
cache = to_single_stage_lockfile(stage)
else:
if not run_cache: # backward compatibility
raise RunCacheNotFoundError(stage)
stage.save_deps()
cache = self._load(stage)
if not cache:
raise RunCacheNotFoundError(stage)
cached_stage = self._create_stage(cache, wdir=stage.wdir)
if pull:
for objs in cached_stage.get_used_objs().values():
self.repo.cloud.pull(objs)
if not cached_stage.outs_cached():
raise RunCacheNotFoundError(stage)
logger.info(
"Stage '%s' is cached - skipping run, checking out outputs",
stage.addressing,
)
cached_stage.checkout()
@staticmethod
def _transfer(func, from_remote, to_remote):
ret = []
runs = from_remote.fs.path.join(from_remote.fs_path, "runs")
if not from_remote.fs.exists(runs):
return []
from_path = from_remote.fs.path
for src in from_remote.fs.find(runs):
rel = from_path.relpath(src, from_remote.fs_path)
dst = to_remote.fs.path.join(to_remote.fs_path, rel)
key = to_remote.fs.path.parent(dst)
# check if any build cache already exists for this key
# TODO: check if MaxKeys=1 or something like that applies
# or otherwise this will take a lot of time!
if to_remote.fs.exists(key) and first(to_remote.fs.find(key)):
continue
func(src, dst)
ret.append(
(from_path.name(from_path.parent(src)), from_path.name(src))
)
return ret
def push(self, remote: Optional[str], odb: Optional["ObjectDB"] = None):
from dvc.data.transfer import _log_exceptions
if odb is None:
odb = self.repo.cloud.get_remote_odb(remote)
return self._transfer(
_log_exceptions(odb.fs.upload),
self.repo.odb.local,
odb,
)
def pull(self, remote: Optional[str]):
from dvc.data.transfer import _log_exceptions
odb = self.repo.cloud.get_remote_odb(remote)
return self._transfer(
_log_exceptions(odb.fs.download),
odb,
self.repo.odb.local,
)
def get_used_objs(self, used_run_cache, *args, **kwargs):
"""Return used cache for the specified run-cached stages."""
from collections import defaultdict
used_objs = defaultdict(set)
for key, value in used_run_cache:
entry = self._load_cache(key, value)
if not entry:
continue
stage = self._create_stage(entry)
for odb, objs in stage.get_used_objs(*args, **kwargs).items():
used_objs[odb].update(objs)
return used_objs
|
the-stack_106_27421 | """Test that the horizontal font metrics are calculated correctly.
Some text in various fonts will be displayed. Green vertical lines mark
the left edge of the text. Blue vertical lines mark the right edge of the
text.
"""
import os
import unittest
from pyglet.gl import *
from pyglet import font
from . import base_text
base_path = os.path.dirname(__file__)
class TEST_HORIZONTAL_METRICS(base_text.TextTestBase):
window_size = 400, 250
def render(self):
font.add_file(os.path.join(base_path, 'action_man.ttf'))
fnt1 = font.load('Action Man', 16)
fnt2 = font.load('Arial', 16)
fnt3 = font.load('Times New Roman', 16)
h = fnt3.ascent - fnt3.descent + 10
self.texts = [
font.Text(fnt1, 'Action Man', 10, h * 1),
font.Text(fnt1, 'Action Man longer test with more words', 10,
h * 2),
font.Text(fnt2, 'Arial', 10, h * 3),
font.Text(fnt2, 'Arial longer test with more words', 10, h * 4),
font.Text(fnt3, 'Times New Roman', 10, h * 5),
font.Text(fnt3, 'Times New Roman longer test with more words',
10, h * 6),
]
def draw(self):
glPushAttrib(GL_CURRENT_BIT)
for text in self.texts:
text.draw()
glBegin(GL_LINES)
glColor3f(0, 1, 0)
glVertex2f(text.x, text.y + text.font.descent)
glVertex2f(text.x, text.y + text.font.ascent)
glColor3f(0, 0, 1)
glVertex2f(text.x + text.width, text.y + text.font.descent)
glVertex2f(text.x + text.width, text.y + text.font.ascent)
glEnd()
glPopAttrib()
|
the-stack_106_27423 | import os
import logging
import json
import requests
from SPARQLWrapper import JSON, SPARQLWrapper
def download():
logging.basicConfig(level=logging.INFO)
endpoint = SPARQLWrapper("https://materialsmine.org/wi/sparql")
endpoint.setQuery(
"""
SELECT DISTINCT ?article WHERE {
?doi a <http://nanomine.org/ns/ResearchArticle> .
?doi <http://semanticscience.org/resource/hasPart> ?article .
}
"""
)
endpoint.setReturnFormat(JSON)
results = endpoint.query().convert()
uris = [r["article"]["value"].replace("http://nanomine.org/sample/", "").replace("-", "_").title()
for r in results["results"]["bindings"]]
files = [uri + ".xml" for uri in uris]
for f in files:
if (os.path.exists(f)):
logging.info("File " + f + " already exists, skipping")
else:
logging.debug("Downloading file " + str(f))
r = requests.get("http://nanomine.org/nmr/xml/" + f)
try:
j = json.loads(r.text)
xml_str = j["data"][0]["xml_str"]
with open(f, "w") as outfile:
outfile.write(xml_str)
except Exception as e:
logging.error("Something went wrong with file " + f)
logging.error(e)
if __name__ == "__main__":
download()
|
the-stack_106_27424 | import cv2
import numpy as np
from datetime import datetime
import array
import fcntl
import os
import argparse
from utils import ArducamUtils
import time
def resize(frame, dst_width):
width = frame.shape[1]
height = frame.shape[0]
scale = dst_width * 1.0 / width
return cv2.resize(frame, (int(scale * width), int(scale * height)))
def display(cap, arducam_utils, fps = False):
counter = 0
start_time = datetime.now()
frame_count = 0
start = time.time()
#f = open("/dev/stdout", "wb")
while True:
ret, frame = cap.read()
counter += 1
frame_count += 1
if arducam_utils.convert2rgb == 0:
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame = frame.reshape(int(h), int(w))
frame = arducam_utils.convert(frame)
frame = resize(frame, 1280.0)
# display
cv2.imshow("Arducam", frame)
ret = cv2.waitKey(1)
# press 'q' to exit.
if ret == ord('q'):
break
if fps and time.time() - start >= 1:
print("fps: {}".format(frame_count),end='\r')
start = time.time()
frame_count = 0
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
## frame.tofile(sys.stdout.buffer)
#f.write(frame.data)
end_time = datetime.now()
elapsed_time = end_time - start_time
avgtime = elapsed_time.total_seconds() / counter
print ("Average time between frames: " + str(avgtime))
print ("Average FPS: " + str(1/avgtime))
def fourcc(a, b, c, d):
return ord(a) | (ord(b) << 8) | (ord(c) << 16) | (ord(d) << 24)
def pixelformat(string):
if len(string) != 3 and len(string) != 4:
msg = "{} is not a pixel format".format(string)
raise argparse.ArgumentTypeError(msg)
if len(string) == 3:
return fourcc(string[0], string[1], string[2], ' ')
else:
return fourcc(string[0], string[1], string[2], string[3])
def show_info(arducam_utils):
_, firmware_version = arducam_utils.read_dev(ArducamUtils.FIRMWARE_VERSION_REG)
_, sensor_id = arducam_utils.read_dev(ArducamUtils.FIRMWARE_SENSOR_ID_REG)
_, serial_number = arducam_utils.read_dev(ArducamUtils.SERIAL_NUMBER_REG)
print("Firmware Version: {}".format(firmware_version))
print("Sensor ID: 0x{:04X}".format(sensor_id))
print("Serial Number: 0x{:08X}".format(serial_number))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Arducam Jetson Nano MIPI Camera Displayer.')
parser.add_argument('-d', '--device', default=0, type=int, nargs='?',
help='/dev/videoX default is 0')
parser.add_argument('-f', '--pixelformat', type=pixelformat,
help="set pixelformat")
parser.add_argument('--width', type=lambda x: int(x,0),
help="set width of image")
parser.add_argument('--height', type=lambda x: int(x,0),
help="set height of image")
parser.add_argument('--fps', action='store_true', help="display fps")
args = parser.parse_args()
# open camera
cap = cv2.VideoCapture(args.device, cv2.CAP_V4L2)
# set pixel format
if args.pixelformat != None:
if not cap.set(cv2.CAP_PROP_FOURCC, args.pixelformat):
print("Failed to set pixel format.")
arducam_utils = ArducamUtils(args.device)
show_info(arducam_utils)
# turn off RGB conversion
if arducam_utils.convert2rgb == 0:
cap.set(cv2.CAP_PROP_CONVERT_RGB, arducam_utils.convert2rgb)
# set width
if args.width != None:
cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
# set height
if args.height != None:
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
# begin display
display(cap, arducam_utils, args.fps)
# release camera
cap.release()
|
the-stack_106_27425 | """API targets module for rbkcli."""
import copy
from rbkcli.base import CONSTANTS, RbkcliBase, RbkcliException
from rbkcli.core.handlers.environment import EnvironmentHandler
from rbkcli.core.handlers.inputs import InputHandler
from rbkcli.core.handlers.outputs import OutputHandler
class ApiTarget(RbkcliBase):
"""Define the Target to which we will request APIs from."""
def __init__(self, ctx, auth=None, env=None):
"""Initialize Rbkcli Rubrik Cluster."""
self.ctx = self.dot_dict(ctx)
# Verify base folder provided.
if self.ctx.base_folder == '':
self.ctx.base_folder = CONSTANTS.BASE_FOLDER
# Make sure the parent init is run.
RbkcliBase.__init__(self, user_profile=self.ctx.user_profile,
base_folder=self.ctx.base_folder,
workflow=self.ctx.workflow)
# req is a dict created from te raw input of API request.
self.req = {}
self.ini_req = {}
# Set workflow to instance variable
# Load the API target from tools.
self.tools = self.tools()
if auth in (None, {}):
self.auth = self.tools.load_auth()
else:
self.auth = self.dot_dict(auth)
base_kit = self._gen_base_kit()
# Load or create environment based on imported apis
self.environment = env
if self.environment is None:
self.environment = EnvironmentHandler(base_kit)
# Instantiate operations and api handlers
self.operations = self.environment.evaluate()
base_kit.target_folder = self.environment.env.folder
# Instantiate InpuHandler for future use
self.validator = InputHandler(base_kit, self.operations)
self.operations.handler.cmdlets.initialize_callbacker(self.operations)
self.operations.handler.scripts.initialize_callbacker(self.operations)
self.formatter = OutputHandler(base_kit, self.operations)
def execute(self, **kwargs):
"""Call the execute method for the provided request."""
# Generate a dictionary of the data passed for request
self._gen_req_dict(kwargs)
# Use input handler to validate the data entered.
# If any error to be raised related to request inconsistency,
# should come from this
self.req = self.validator.validate(self.ini_req)
# Once input is normalize pass it on to request the operation.
return self.operations.execute(self.req)
def documentation(self, **kwargs):
"""Call the documentation method for the provided request."""
# Generate a dictionary of the data passed for request
self._gen_req_dict(kwargs)
# Use input handler to validate the data entered.
# If any error to be raised related to request inconsistency,
# should come from this
self.req = self.validator.validate(self.ini_req)
# Once input is normalize pass it on to request the operation.
return self.operations.documentation(self.req)
def information(self, **kwargs):
"""Call the information method for the provided request."""
# Generate a dictionary of the data passed for request
self._gen_req_dict(kwargs)
# Use input handler to validate the data entered.
# If any error to be raised related to request inconsistency,
# should come from this
self.req = self.validator.validate(self.ini_req)
# Once input is normalize pass it on to request the operation.
return self.operations.information(self.req)
def command(self, **kwargs):
"""Call the information method for the provided request."""
# Generate a dictionary of the data passed for request
self._gen_req_dict(kwargs)
result = self.dot_dict()
#print(self.req)
# Converting endpoint to a list.
if isinstance(self.req.api_endpoint, str):
self.req.api_endpoint = [self.req.api_endpoint]
# Normalizing request dictionary
self.req.endpoint = ' '.join(self.req.api_endpoint)
self.req.formatt = 'raw'
self.req.param = self.req.query
self.req.data = self.req.parameter
for key, value in self.req.items():
self.ini_req[key] = value
# Get the documentation string for every API run.
self.req.documentation_objct = self.documentation(args=self.req).text
## FIX
# If info requested, only print info.
if self.req.info:
result = self.information(args=self.req)
# If documentation requested, only print documentation.
elif self.req.documentation:
result.text = self.req.documentation_objct
# If available keys requested get the available keys.
elif self.req.output_workflow != []:
if '?' in self.req.output_workflow[0]['value']:
result.text = self.formatter.available_fields(self.req)
self.req.output_workflow.pop(0)
else:
result = self.execute(args=self.req)
else:
result = self.execute(args=self.req)
return self.formatter.outputfy(self.req, result)
def _gen_req_dict(self, kwargs):
"""Generate the request dictionary to be passed."""
# Create the dictionary as a dot ddict for easy access.
if self.req == {}:
self.req = self.dot_dict()
self.ini_req = self.dot_dict()
kwargs = kwargs['args']
for key in kwargs.keys():
self.req[key] = kwargs[key]
def _gen_base_kit(self):
"""Generate a shared context to pass to other classes."""
# Create the dictionary as a dot ddict for easy access.
base_kit = self.dot_dict()
base_kit.config_dict = self.conf_dict
base_kit.base_folder = self.ctx.base_folder
base_kit.tools = self.tools
base_kit.logger = self.rbkcli_logger
base_kit.dot_dict = self.dot_dict
base_kit.target = self.auth.server
base_kit.api_handler = self.api_handler
base_kit.discover_fn = self.ctx.discover_fn
base_kit.auth = self.auth
base_kit.user_profile = self.user_profile
base_kit.json_ops = self.json_ops
base_kit.workflow = self.ctx.workflow
### Test here
callback_kit = self.dot_dict()
base_kit.callback_cmd = self.command
base_kit.parser = self.ctx.parser
return base_kit
class RubrikCluster(ApiTarget):
"""
Class to instanciate a API target.
Which is specifically a Rubrik Cluster, has a dicovery_action for the
target that could be different per target type.
"""
def __init__(self, ctx, auth=None, env=None):
"""Initialize Rbkcli Rubrik Cluster."""
ctx['discover_fn'] = self._discovery_action
ApiTarget.__init__(self, ctx, auth=auth, env=env)
def _discovery_action(self):
"""Gather cluster and nodes data, returns list of dict."""
# Declare the target resolution var that will be returned.
resolution_data = []
target_resolution = self.dot_dict()
# Instantiate the raw Api Requester.
requester = self.api_requester(self.auth)
# Request the data from the target.
cluster_data = requester.demand('get', '/v1/cluster/me')
node_data = requester.demand('get', '/internal/cluster/me/node')
unauthr_user = str('The request was a legal request, but the server '
'is refusing to respond to it.')
# Verify if the discovery process got necessary data
if node_data.status_code == 404:
error = str('Unable to discover cluster, discovery APIs returned '
'status code ' + str(node_data.status_code) + '. Is '
+ str(self.auth.server) + ' a valid Rubrik system?\n')
raise RbkcliException.ApiRequesterError(error)
elif node_data.text == unauthr_user:
error = str('Unable to discover cluster, discovery APIs returned '
'message: \n --> ' + str(node_data.text) + '\nIs '
+ str(self.auth.username) + ' a full Rubrik admin?\n')
raise RbkcliException.ApiRequesterError(error)
# Convert data to usable dict.
node_dict = self.tools.json_load(node_data.text)
cluster_dict = self.tools.json_load(cluster_data.text)
node_dict = node_dict['data']
# Assign value from received data to final dict to be returned.
for node in node_dict:
target_resolution.envId = cluster_dict['id']
target_resolution.envName = cluster_dict['name']
target_resolution.id = node['id']
target_resolution.ip = node['ipAddress']
resolution_data.append(target_resolution)
target_resolution = self.dot_dict()
return resolution_data
class RbkcliTarget():
"""
Class to manage Target Groups.
Based on provided configuration, will instantiate multiple RubrikClusters.
Verify if their version is compatible and provide the same operations as
any other ApiTarget for Cli to be dynamically created.
This should be a management layer, to deal with multiple targets and
sessions still keeping consistency of the one CLI.
Also will normalize multiple API responses into one output, provide access
to individual targets and targetGroup
Predicted to be implemented in version 1.3
rbk_cli = RbkCliTarget(auth=auth)
rbk_cli.add_target(auth=auth1)
rbk_cli.add_target(auth=auth2)
rbk_cli.target_group.list()
rbk_cli.target_group.execute()
rbk_cli.target.<server_IP>.execute()
"""
def __init__(self, ctx, auth=None, env=None):
"""Initialize RbkcliTarget class."""
# Attribute the to instance var the instantiation of a Cluster.
self.target = RubrikCluster(ctx,
auth=auth,
env=env)
def add_target(self):
"""Future method to dynamically add targets to the rbkcli execution."""
def remove_target(self):
"""Future method to dynamically rmv targets to the rbkcli execution."""
|
the-stack_106_27426 | import json
class ValidationResult(object):
ERROR = 1
WARNING = 2
def __init__(self, namespace, classname):
super(ValidationResult, self).__init__()
self.warnings = []
self.errors = []
self.namespace = namespace
self.classname = classname
def add_error(self, warning):
if warning:
if warning.level == ValidationResult.WARNING:
self.warnings.append(warning)
elif warning.level == ValidationResult.ERROR:
self.errors.append(warning)
def to_json(self):
mapping = self.to_dict()
return json.dumps(mapping)
def to_dict(self):
mapping = {}
mapping['warnings'] = []
for warning in self.warnings:
mapping['warnings'].append(warning.to_dict())
mapping['errors'] = []
for error in self.errors:
mapping['errors'].append(error.to_dict())
mapping['namespace'] = self.namespace
mapping['classname'] = self.classname
return mapping
def __len__(self):
return len(self.warnings) + len(self.errors)
class ValidationWarning(object):
def __init__(self, level, string, line, line_num):
super(ValidationWarning, self).__init__()
self.level = level
self.string = string
self.line_num = line_num
self.line_text = line
def to_dict(self):
mapping = {}
mapping['level'] = \
"Error" if self.level == ValidationResult.ERROR else "Warning"
mapping['string'] = self.string
mapping['line'] = self.line_text
mapping['num'] = self.line_num
return mapping
def to_json(self):
mapping = self.to_dict()
return json.dumps(mapping)
|
the-stack_106_27428 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
test_data = [12,5,8,10,2,16,259,1]
def merge_sort_not_recursion(data):
step = 1
while step <= len(data)//2:
for i in range(0, len(data), step*2):
res = []
left = data[i:min(i+step, len(data))]
right = data[min(i+step, len(data)-1):min(i+2*step, len(data))]
while left and right:
if left[0] < right[0]: res.append(left.pop(0))
else: res.append(right.pop(0))
if left: res += left
if right: res += right
data[i:i+len(res)] = res
step += step
return data
def merge(left, right):
res = []
while left and right:
if left[0] < right[0]:
res.append(left.pop(0))
else:
res.append(right.pop(0))
if left:
res += left
if right:
res += right
return res
def merge_sort(data):
if len(data) <= 1: return data
mid = len(data)//2
left, right = data[:mid], data[mid:]
left, right = merge_sort(left), merge_sort(right)
return merge(left, right)
print(test_data)
print('递归', merge_sort(test_data))
print('非递归', merge_sort_not_recursion(test_data))
|
the-stack_106_27429 | from typing import List
import dask.dataframe as dd
from nvtx import annotate
from dask_sql.utils import new_temporary_column
@annotate("GROUPBY_GET_GROUPBY_WITH_NULL_COLS", color="green", domain="dask_sql_python")
def get_groupby_with_nulls_cols(
df: dd.DataFrame, group_columns: List[str], additional_column_name: str = None
):
"""
SQL and dask are treating null columns a bit different:
SQL will put them to the front, dask will just ignore them
Therefore we use the same trick as fugue does:
we will group by both the NaN and the real column value
"""
if additional_column_name is None:
additional_column_name = new_temporary_column(df)
group_columns_and_nulls = []
for group_column in group_columns:
is_null_column = group_column.isnull()
non_nan_group_column = group_column.fillna(0)
# split_out doesn't work if both columns have the same name
is_null_column.name = f"{is_null_column.name}_{new_temporary_column(df)}"
group_columns_and_nulls += [is_null_column, non_nan_group_column]
if not group_columns_and_nulls:
# This can happen in statements like
# SELECT SUM(x) FROM data
# without any groupby statement
group_columns_and_nulls = [additional_column_name]
return group_columns_and_nulls
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.