filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_29848 | # -*- coding: utf-8 -*-
"""
Script that updates the Siwick Research Group website. Modify the files for the
website in the 'website' folder first. See script help:
>>> python deploy.py --help
You will need to know the Siwick research group's CPM server password.
This script requires:
- Python 3.3+
- paramiko
- tqdm
"""
import argparse
import sys
import webbrowser
from contextlib import suppress
from getpass import getpass
from os import listdir, walk
from os.path import getsize, isfile, join
try:
from paramiko import SSHClient, AutoAddPolicy, AuthenticationException
from tqdm import tqdm
except ImportError:
print("paramiko and tqdm are required for this script to run.")
sys.exit()
# The directory to mirror
CONTENT_DIR = "_rendered"
TARGET_DIR = "website"
DESCRIPTION = """Update the Siwick Research Group website."""
EPILOG = """
Don't forget to render the website using the static site compiler `siwick-website`.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--show",
action="store_true",
help="Navigate to website with default web browser after deployment",
)
def put_dir(client, source, target, exclude_ext=tuple()):
"""
Upload the contents of the source directory to the target path, including subdirectories.
Parameters
----------
client : paramiko.SFTPClient
source, target : str or path-like
Source directory and target directory, respectively.
Yields
------
size : int
Bytes transferred.
message : str
Message specifying the filename, and whether it was transferred or skipped.
"""
# string.endswith must be compared with tuples of strings
exclude_ext = tuple(exclude_ext)
for item in listdir(source):
src_path = join(source, item)
dst_path = item
if isfile(src_path):
if src_path.endswith(exclude_ext):
yield (getsize(src_path), "skipped: " + str(src_path))
else:
yield (
client.put(src_path, dst_path).st_size,
"transferred: " + str(src_path),
)
else:
with suppress(IOError):
client.mkdir(dst_path)
client.chdir(dst_path)
yield from put_dir(client, src_path, dst_path, exclude_ext=exclude_ext)
client.chdir("..")
if __name__ == "__main__":
arguments = parser.parse_args()
password = getpass("CPM server password: ")
with SSHClient() as client:
client.set_missing_host_key_policy(AutoAddPolicy)
try:
client.connect(
"gollum.physics.mcgill.ca", username="decotret", password=password
)
print("Connected to CPM server.")
except AuthenticationException as e:
print(str(e))
sys.exit()
# Delete the current website content
# This is to remove content that may be too old
client.exec_command(f"rm -rf '{TARGET_DIR}/'")
# Step 2: Calculate the transfer size
with client.open_sftp() as sftp_client:
total_bytes = sum(
getsize(join(root, file))
for root, _, files in walk(CONTENT_DIR)
for file in files
)
with suppress(IOError):
sftp_client.mkdir(TARGET_DIR)
sftp_client.chdir(TARGET_DIR)
# Step 3 : upload content
upload_stream = put_dir(sftp_client, source=CONTENT_DIR, target="")
with tqdm(
desc="Upload to server", unit_scale=True, unit="B", total=total_bytes
) as pbar:
for (bytes_transferred, fname) in upload_stream:
pbar.update(bytes_transferred)
pbar.write("\r {}".format(fname))
# Step 4: sync with the domain
out = client.exec_command(
f"rsync -va --delete '{TARGET_DIR}/' /WWW/decotret/siwicklab"
)
print("Upload done!")
if arguments.show:
print("Opening web page externally...")
webbrowser.open("http://www.physics.mcgill.ca/siwicklab")
|
the-stack_106_29849 | import tensorflow as tf
import keras
import skimage
import cv2
import numpy as np
import mrcnn.visualize as vz
from skimage.transform import resize
from skimage.color import rgb2grey
import warnings
def make_image(tensor):
"""
Convert an numpy representation image to Image protobuf.
Copied from https://github.com/lanpa/tensorboard-pytorch/
"""
from PIL import Image
height, width, channel = tensor.shape
image = Image.fromarray(tensor)
import io
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
class TensorBoardImage(keras.callbacks.Callback):
def __init__(self, tag, mrcnn_model, generator, config, logdir):
super().__init__()
self.tag = tag
self.mrcnn_model = mrcnn_model
self.generator = generator
self.config = config
#self.colors = visualize.random_colors(20)
self.logdir = logdir
self.writer = tf.summary.FileWriter(self.logdir)
def detect(self, verbose=0):
results = []
for i in range(10):
inputs, outputs = next(self.generator)
batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks, batch_inst_ids, batch_mold_image_meta, batch_mold_window = inputs
outputs = self.model.predict_on_batch([batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks, batch_inst_ids])
mrcnn_class_logits, mrcnn_bbox, mrcnn_mask, detections,\
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss, inst_id_loss \
= outputs
for i, image in enumerate(batch_images):
final_rois, final_class_ids, final_scores, final_masks =\
self.mrcnn_model.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, batch_images[i].shape,
batch_mold_window[i])
#inst_ids = instance_ids[i]
results.append({
"image": self.mrcnn_model.unmold_image(image),
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
#"inst_ids": inst_ids,
})
return results
def on_epoch_end(self, epoch, logs={}):
results = self.detect()
values = []
for i, r in enumerate(results):
im = vz.display_instances(r['image'], r['rois'], r['masks'], r['class_ids'],
["BG", "baloon"], r['scores'],
title="Predictions", auto_show = False)
image = make_image(im)
values.append(tf.Summary.Value(tag=self.tag + str(i), image=image))
summary = tf.Summary(value=values)
self.writer.add_summary(summary, epoch)
self.writer.flush()
#self.writer.close()
print('Plot finished')
return
class MRCNN_ModelCheckpoint(keras.callbacks.Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled with the values of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, backbone_filepath, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
super(MRCNN_ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.backbone_filepath = backbone_filepath
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
backbone_filepath = self.backbone_filepath.format(epoch=epoch + 1, **logs)
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.get_layer("backbone_model").save_weights(backbone_filepath, overwrite=True)
self.model.get_layer("kira_model").save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.get_layer("backbone_model").save_weights(backbone_filepath, overwrite=True)
self.model.get_layer("kira_model").save_weights(filepath, overwrite=True)
else:
self.model.get_layer("backbone_model").save(backbone_filepath, overwrite=True)
self.model.get_layer("kira_model").save(filepath, overwrite=True) |
the-stack_106_29850 | from redbot.core import commands
from redbot.core.utils.chat_formatting import pagify, bold
import logging
# create log with 'spam_application'
log = logging.getLogger("karma.py")
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s - %(name)s::%(funcName)s::%(lineno)d"
"- %(levelname)s - %(message)s"
)
# create console handler
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(formatter)
# add the handlers to the log
print('adding handler-')
# allows to add only one instance of file handler and stream handler
if log.handlers:
print('making sure we do not add duplicate handlers')
for handler in log.handlers:
# add the handlers to the log
# makes sure no duplicate handlers are added
if not isinstance(handler, logging.StreamHandler):
log.addHandler(consoleHandler)
print('added stream handler')
else:
log.addHandler(consoleHandler)
print('added handler for the first time')
class RoleIds(commands.Cog):
"""Retreives a list of server roles and their ids"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def getIds(self, ctx):
"""displays a list of server roles and their ids"""
roles = []
async with ctx.channel.typing():
for role in ctx.guild.roles:
roles.append(f"{role.name}: {role.id}")
log.debug(f"added {role.name}")
out = "\n".join(roles)
pages = pagify(out)
await ctx.send(bold("Role Name: Role Id"))
for page in pages:
await ctx.send(page)
|
the-stack_106_29853 | import random
from typing import TypeVar
def __quicksort(arr: list[TypeVar('T')], left: int, right: int, is_ascending: bool) -> None:
if left >= right:
return
i, j = left, right
pivot = arr[random.randint(left, right)]
while i <= j:
while arr[i] < pivot if is_ascending else arr[i] > pivot:
i += 1
while arr[i] > pivot if is_ascending else arr[i] < pivot:
j -= 1
if i <= j:
arr[i], arr[j] = arr[j], arr[i]
i, j = i + 1, j - 1
__quicksort(arr, left, j, is_ascending)
__quicksort(arr, i, right, is_ascending)
def quicksort(arr: list[TypeVar('T')], is_ascending: bool = True) -> None:
arr.sort()
|
the-stack_106_29855 | from __future__ import absolute_import, division, print_function
from libtbx import test_utils
import libtbx.load_env
tst_list = [
"$D/tst_cma_es.py",
]
def run():
build_dir = libtbx.env.under_build("cma_es")
dist_dir = libtbx.env.dist_path("cma_es")
test_utils.run_tests(build_dir, dist_dir, tst_list)
if (__name__ == "__main__"):
run()
|
the-stack_106_29859 | # KNN
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
def trainTest(dftt, x, y):
# dftt - DF Train and Test
## SPlit Dataset to train and test
x_train, x_test, y_train, y_test = train_test_split(dftt[x], dftt[y], test_size=0.20, random_state=101)
model = KNeighborsClassifier(n_neighbors=5)
model.fit(x_train, y_train)
prediction = model.predict(x_test)
predict_prob = model.predict_proba(x_test)
result = pd.DataFrame(columns=['Test', 'Prediction', 'Prob'])
result['Test'] = y_test
result['Prediction'] = prediction
result['Prob'] = predict_prob[:,1]
return result
def trainPredict(dft, dfp, x, y):
# dft - DF Train,
# dfp - DF Pprediction'
model = KNeighborsClassifier(n_neighbors=5)
model.fit(dft[x], dft[y])
prediction = model.predict(dfp[x])
predict_prob = model.predict_proba(dfp[x])
result = pd.DataFrame(columns=['Id', 'Prediction', 'Prob'])
result['Id'] = dfp['id']
result['Prediction'] = prediction
result['Prob'] = predict_prob[:, 1]
result.to_csv('prediction.csv') |
the-stack_106_29860 | import sys
import xml.etree.ElementTree as ET
from classes.Feature import *
from classes.Extension import *
# near and far are defined by windows.h ... :(
exceptions = ["GetProcAddress", "near", "far"]
class Parameter:
def __init__(self, xml):
self.name = xml.find("name").text
# check for additional params
if list(xml.itertext())[-1] != self.name:
print(" WARNING: unexpected parameter format for " + self.name)
self.type = " ".join([t.strip() for t in xml.itertext()][:-1]).strip()
if self.name in exceptions:
self.name += "_"
if self.type.startswith("struct "):
self.type = self.type[7:]
self.groupString = xml.attrib.get("group", None)
def __str__(self):
return "%s %s" % (self.type, self.name)
class Command:
def __init__(self, xml, features, extensions, api):
self.api = api
proto = xml.find("proto")
self.name = proto.find("name").text
self.returntype = " ".join([t.strip() for t in proto.itertext()][:-1]).strip()
self.params = []
for param in xml.iter("param"):
self.params.append(Parameter(param))
self.reqFeatures = []
self.remFeatures = [] # len(remF) should always be < 2
self.reqExtensions = []
for feature in features:
if feature.api == api and self.name in feature.reqCommandStrings:
self.reqFeatures.append(feature)
for feature in features:
if feature.api == api and self.name in feature.remCommandStrings:
self.remFeatures.append(feature)
for extension in extensions:
if extension.api == api and self.name in extension.reqCommandStrings:
self.reqExtensions.append(extensions)
def __str__(self):
return "%s %s ( %s )" % (self.returntype, self.name, ", ".join([str(p) for p in self.params]))
def __lt__(self, other):
return self.name < other.name
# this compares the given feature with the lowest requiring feature
def supported(self, feature, core):
if feature is None:
return True
# Note: design decission:
# every featured functions include should not contain commands from extensions.
#if len(self.reqFeatures) == 0 and len(self.reqExtensions) > 0:
# return True
if len(self.reqFeatures) == 0:
return False
if core:
return min(self.reqFeatures) <= feature and (not self.remFeatures or min(self.remFeatures) > feature)
else:
return min(self.reqFeatures) <= feature
def parseCommands(xml, features, extensions, api):
commands = []
for C in xml.iter("commands"):
# only parse command if
# (1) at least one feature or extension requires this command of requested api
for command in C.iter("command"):
proto = command.find("proto")
name = proto.find("name").text
# enforce constraint (1)
if not any(name in feature.reqCommandStrings \
for feature in features if len(feature.reqCommandStrings) > 0) \
and \
not any(name in extension.reqCommandStrings \
for extension in extensions if len(extension.reqCommandStrings) > 0):
continue
if "api" in command.attrib and command.attrib["api"] != api:
continue
commands.append(Command(command, features, extensions, api))
return sorted(commands)
def patchCommands(commands, patches):
commandsByName = dict([(command.name, command) for command in commands])
for patch in patches:
if patch.name not in commandsByName:
# ToDo: could/should extend the list of commands here
continue
command = commandsByName[patch.name]
for param in command.params:
patchedParam = next((p for p in patch.params if p.name == param.name), None)
if patchedParam is not None:
param.groupString = patchedParam.groupString
param.type = patchedParam.type
def verifyCommands(commands, bitfGroups):
bitfGroupsByName = dict([(group.name, group) for group in bitfGroups])
# all non verified commands should be patched
missing = dict()
unresolved = dict()
for command in commands:
for param in (param for param in command.params):
# check for bitfield groups (other enum groups not yet used in gl.xml)
if param.type != "GLbitfield":
continue
if param.groupString is None:
missing[param] = command
elif param.groupString not in bitfGroupsByName:
unresolved[param] = command
if len(missing) > 0:
print(" WARNING: " + str(len(missing)) + " missing group specification (defaulting to GLbitfield):")
for param, command in missing.items():
print(" %s (in %s)" % (param.name, command.name))
if len(unresolved) > 0:
print(" WARNING: " + str(len(unresolved)) + " unresolved groups:")
for param, command in unresolved.items():
print(" %s (in %s)" % (param.groupString, command.name))
|
the-stack_106_29862 | from json import dumps
from os import getcwd, listdir
from os.path import exists, join
from pathlib import Path
from re import sub
class Colors:
OK = '\033[92m'
WARN = '\033[93m'
ERR = '\033[31m'
BOLD = '\033[1m'
FAIL = ERR + BOLD
# Reset color in console. end="" is there so there isnt an extra new line
def reset():
print('\033[0m', end="")
user_directory = str(Path.home()) # C:/Users/.../
working_directory = getcwd() # ./
osu_directory = None # osu! directory | C:/Users/.../AppData/Local/osu!/
osu_songs_directory = None # osu!/Songs/
ids = [] # List of beatmap IDs
output = {} # Object to be written to the output file
output_path = "OsuBackup_output.json" # Output file name
input_obj = {}
input_path = None
if __name__ == '__main__':
osu_directory = join(user_directory, "AppData/Local/osu!/")
while not exists(osu_directory):
osu_directory = input(Colors.FAIL + "Directory not found! Enter custom osu directory here:\n")
reset()
osu_songs_directory = join(osu_directory, "Songs")
while not exists(osu_songs_directory):
osu_songs_directory = input(Colors.FAIL + "Songs directory not found! Enter custom osu songs directory here:\n")
reset()
mode = None
while mode not in ["0", "1"]:
mode = input(Colors.WARN + "Would you like to backup your osu beatmaps or restore them from a previous backup?"
"(0:backup | 1:restore):\n")
reset()
proceed = None
while proceed not in ["y", "n", "yes", "no"]:
if mode:
proceed = input(Colors.WARN + "You will be backing up your osu beatmaps. Continue (y/n)?\n")
else:
proceed = input(Colors.WARN + "You will be restoring your osu beatmaps. Continue (y/n)?\n")
reset()
if proceed in ["y", "yes"]:
# Continue
if mode:
# Backup
while exists(output_path) or not output_path:
output_path = sub(r'\W+', '', input(
Colors.FAIL + "File " + output_path +
" already exists! Please enter a different output file:\n")).strip()
reset()
ids = []
# Loop over each folder inside of the osu_songs directory, song_directory_name being each sub folders name
for beatmap_folder in listdir(osu_songs_directory):
beatmap_set_id = beatmap_folder.split(' ')[0]
print(Colors.OK + "Found: " + beatmap_folder)
ids.append(beatmap_set_id)
reset()
output.update({"beatmapsets": ids})
print(output)
# Create the output file and write the backup output to it
f = open(output_path, "w")
f.write(dumps(output))
f.close()
input("Backup Successful. Press enter to exit.")
else:
# Restore
print("Restore")
# TODO:
# # - Get each beatmap user already has
# # - If any IDS are in the backup, remove them from download queue
# # - Let user define delay (min x seconds)
# # - Give estimate on time based on download speed & amount of maps & delay
# # - Loop over all IDS in the backups 'beatmapsets' array
# # - Download .osz from = 'https://osu.ppy.sh/beatmapsets/ID/download'
# # - Unzip the .osz file and move into songs folder
# # - Delete .osz file
# # - Wait however many seconds so as to not get rate limited
# # ----- I emailed osu support to see what delay if any I should set, and whether I could continue
# # this project or not at all and I am waiting for a response ---
input("Not made yet. Press enter to exit.")
else:
exit()
|
the-stack_106_29865 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from distutils import version
import functools
import os
import random
from neutron_lib.agent import topics
from neutron_lib import constants
from neutron_lib import context
from oslo_utils import timeutils
import six
import testtools
import neutron
from neutron.agent.common import ovs_lib
from neutron.db import agents_db
HOST = 'localhost'
DEFAULT_AZ = 'nova'
def find_file(filename, path):
"""Find a file with name 'filename' located in 'path'."""
for root, _, files in os.walk(path):
if filename in files:
return os.path.abspath(os.path.join(root, filename))
def find_sample_file(filename):
"""Find a file with name 'filename' located in the sample directory."""
return find_file(
filename,
path=os.path.join(neutron.__path__[0], '..', 'etc'))
def get_test_log_path():
return os.environ.get('OS_LOG_PATH', '/tmp')
class FakePlugin(agents_db.AgentDbMixin):
pass
def _get_l3_agent_dict(host, agent_mode, internal_only=True,
ext_net_id='', az=DEFAULT_AZ):
return {
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'availability_zone': az,
'configurations': {'agent_mode': agent_mode,
'handle_internal_only_routers': internal_only,
'gateway_external_network_id': ext_net_id}}
def _register_agent(agent, plugin=None):
if not plugin:
plugin = FakePlugin()
admin_context = context.get_admin_context()
plugin.create_or_update_agent(admin_context, agent, timeutils.utcnow())
return plugin._get_agent_by_type_and_host(
admin_context, agent['agent_type'], agent['host'])
def register_l3_agent(host=HOST, agent_mode=constants.L3_AGENT_MODE_LEGACY,
internal_only=True, ext_net_id='', az=DEFAULT_AZ):
agent = _get_l3_agent_dict(host, agent_mode, internal_only, ext_net_id, az)
return _register_agent(agent)
def _get_dhcp_agent_dict(host, networks=0, az=DEFAULT_AZ):
agent = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'agent_type': constants.AGENT_TYPE_DHCP,
'availability_zone': az,
'configurations': {'dhcp_driver': 'dhcp_driver',
'networks': networks}}
return agent
def register_dhcp_agent(host=HOST, networks=0, admin_state_up=True,
alive=True, az=DEFAULT_AZ):
agent = _register_agent(
_get_dhcp_agent_dict(host, networks, az=az))
if not admin_state_up:
set_agent_admin_state(agent['id'])
if not alive:
kill_agent(agent['id'])
return FakePlugin()._get_agent_by_type_and_host(
context.get_admin_context(), agent['agent_type'], agent['host'])
def kill_agent(agent_id):
hour_ago = timeutils.utcnow() - datetime.timedelta(hours=1)
FakePlugin().update_agent(
context.get_admin_context(),
agent_id,
{'agent': {
'started_at': hour_ago,
'heartbeat_timestamp': hour_ago}})
def revive_agent(agent_id):
now = timeutils.utcnow()
FakePlugin().update_agent(
context.get_admin_context(), agent_id,
{'agent': {'started_at': now, 'heartbeat_timestamp': now}})
def set_agent_admin_state(agent_id, admin_state_up=False):
FakePlugin().update_agent(
context.get_admin_context(),
agent_id,
{'agent': {'admin_state_up': admin_state_up}})
def _get_l2_agent_dict(host, agent_type, binary, tunnel_types=None,
tunneling_ip='20.0.0.1', interface_mappings=None,
bridge_mappings=None, l2pop_network_types=None,
device_mappings=None, start_flag=True,
integration_bridge=None):
agent = {
'binary': binary,
'host': host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': {},
'agent_type': agent_type,
'tunnel_type': [],
'start_flag': start_flag}
if tunnel_types is not None:
agent['configurations']['tunneling_ip'] = tunneling_ip
agent['configurations']['tunnel_types'] = tunnel_types
if bridge_mappings is not None:
agent['configurations']['bridge_mappings'] = bridge_mappings
if interface_mappings is not None:
agent['configurations']['interface_mappings'] = interface_mappings
if l2pop_network_types is not None:
agent['configurations']['l2pop_network_types'] = l2pop_network_types
if device_mappings is not None:
agent['configurations']['device_mappings'] = device_mappings
if integration_bridge is not None:
agent['configurations']['integration_bridge'] = integration_bridge
return agent
def register_ovs_agent(host=HOST, agent_type=constants.AGENT_TYPE_OVS,
binary='neutron-openvswitch-agent',
tunnel_types=['vxlan'], tunneling_ip='20.0.0.1',
interface_mappings=None, bridge_mappings=None,
l2pop_network_types=None, plugin=None, start_flag=True,
integration_bridge=None):
agent = _get_l2_agent_dict(host, agent_type, binary, tunnel_types,
tunneling_ip, interface_mappings,
bridge_mappings, l2pop_network_types,
start_flag=start_flag,
integration_bridge=integration_bridge)
return _register_agent(agent, plugin)
def register_linuxbridge_agent(host=HOST,
agent_type=constants.AGENT_TYPE_LINUXBRIDGE,
binary='neutron-linuxbridge-agent',
tunnel_types=['vxlan'], tunneling_ip='20.0.0.1',
interface_mappings=None, bridge_mappings=None,
plugin=None):
agent = _get_l2_agent_dict(host, agent_type, binary, tunnel_types,
tunneling_ip=tunneling_ip,
interface_mappings=interface_mappings,
bridge_mappings=bridge_mappings)
return _register_agent(agent, plugin)
def register_macvtap_agent(host=HOST,
agent_type=constants.AGENT_TYPE_MACVTAP,
binary='neutron-macvtap-agent',
interface_mappings=None, plugin=None):
agent = _get_l2_agent_dict(host, agent_type, binary,
interface_mappings=interface_mappings)
return _register_agent(agent, plugin)
def register_sriovnicswitch_agent(host=HOST,
agent_type=constants.AGENT_TYPE_NIC_SWITCH,
binary='neutron-sriov-nic-agent',
device_mappings=None, plugin=None):
agent = _get_l2_agent_dict(host, agent_type, binary,
device_mappings=device_mappings)
return _register_agent(agent, plugin)
def requires_py2(testcase):
return testtools.skipUnless(six.PY2, "requires python 2.x")(testcase)
def requires_py3(testcase):
return testtools.skipUnless(six.PY3, "requires python 3.x")(testcase)
def get_not_used_vlan(bridge, vlan_range):
port_vlans = bridge.ovsdb.db_find(
'Port', ('tag', '!=', []), columns=['tag']).execute()
used_vlan_tags = {val['tag'] for val in port_vlans}
available_vlans = vlan_range - used_vlan_tags
return random.choice(list(available_vlans))
def skip_if_ovs_older_than(ovs_version):
"""Decorator for test method to skip if OVS version doesn't meet
minimal requirement.
"""
def skip_if_bad_ovs(f):
@functools.wraps(f)
def check_ovs_and_skip(test):
ovs = ovs_lib.BaseOVS()
current_ovs_version = version.StrictVersion(
ovs.config['ovs_version'])
if current_ovs_version < version.StrictVersion(ovs_version):
test.skipTest("This test requires OVS version %s or higher." %
ovs_version)
return f(test)
return check_ovs_and_skip
return skip_if_bad_ovs
|
the-stack_106_29868 | import os
import ast
import re
import yaml
from jinja2 import Environment, BaseLoader, FileSystemLoader, select_autoescape
import traceback
import functools
import time
import cProfile
import io
import pstats
import datetime
from collections import OrderedDict
import appdaemon.utils as ha
class Dashboard:
def __init__(self, config_dir, logging, **kwargs):
#
# Set Defaults
#
self.dash_install_dir = os.path.dirname(__file__)
self.config_dir = config_dir
self.logging = logging
self.logger = logging.get_child("_dashboard")
self.access = logging.get_access()
self.dashboard_dir = os.path.join(config_dir, "dashboards")
self.profile_dashboard = False
self.compile_dir = os.path.join(self.config_dir, "compiled")
self.javascript_dir = None
self.compiled_javascript_dir = os.path.join(self.compile_dir, "javascript")
self.compiled_html_dir = os.path.join(self.compile_dir, "html")
self.template_dir = None
self.css_dir = None
self.compiled_css_dir = os.path.join(self.compile_dir, "css")
self.fonts_dir = None
self.webfonts_dir = None
self.images_dir = None
self.base_url = ""
self.dash_force_compile = False
self.dash_compile_on_start = False
self.max_include_depth = 10
self.fa4compatibility = False
self.transport = "ws"
self.title = "HADashboard"
#
# Process any overrides
#
self._process_arg("profile_dashboard", kwargs)
self._process_arg("dashboard_dir", kwargs)
self._process_arg("compile_dir", kwargs)
self._process_arg("javascript_dir", kwargs)
self._process_arg("compiled_javascript_dir", kwargs)
self._process_arg("compiled_html_dir", kwargs)
self._process_arg("template_dir", kwargs)
self._process_arg("css_dir", kwargs)
self._process_arg("compiled_css_dir", kwargs)
self._process_arg("fonts_dir", kwargs)
self._process_arg("webfonts_dir", kwargs)
self._process_arg("images_dir", kwargs)
self._process_arg("base_url", kwargs)
self._process_arg("dash_force_compile", kwargs)
self._process_arg("dash_compile_on_start", kwargs)
self._process_arg("max_include_depth", kwargs)
self._process_arg("fa4compatibility", kwargs)
self._process_arg("transport", kwargs)
self._process_arg("title", kwargs)
#
# Create some dirs
#
try:
js = os.path.join(self.compile_dir, "javascript")
css = os.path.join(self.compile_dir, "css")
if not os.path.isdir(self.compile_dir):
os.makedirs(self.compile_dir)
if not os.path.isdir(os.path.join(self.compile_dir, "javascript")):
os.makedirs(js)
if not os.path.isdir(os.path.join(self.compile_dir, "css")):
os.makedirs(css)
ha.check_path("css", self.logger, css, permissions="rwx")
ha.check_path("javascript", self.logger, js, permissions="rwx")
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during HADashboard initialization")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
#
# Set a start time
#
self.start_time = datetime.datetime.now()
def _timeit(func):
@functools.wraps(func)
def newfunc(self, *args, **kwargs):
start_time = time.time()
result = func(self, *args, **kwargs)
elapsed_time = time.time() - start_time
self.access.info("function [%s] finished in %s ms", func.__name__, int(elapsed_time * 1000))
return result
return newfunc
def _profile_this(fn):
def profiled_fn(self, *args, **kwargs):
pr = None
if self.profile_dashboard:
pr = cProfile.Profile()
pr.enable()
dash = fn(self, *args, **kwargs)
if self.profile_dashboard:
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
return dash
return profiled_fn
def _process_arg(self, arg, kwargs):
if kwargs:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
# noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences
def _load_css_params(self, skin, skindir):
yaml_path = os.path.join(skindir, "variables.yaml")
if os.path.isfile(yaml_path):
with open(yaml_path, "r") as yamlfd:
css_text = yamlfd.read()
try:
css = self._load_yaml(css_text)
except yaml.YAMLError as exc:
self.logger.warning("Error loading CSS variables")
self._log_yaml_error(exc)
return None
if css is None:
return {}
else:
return self._resolve_css_params(css, css)
else:
self.logger.warning("Error loading variables.yaml for skin '%s'", skin)
return None
def _resolve_css_params(self, fields, subs):
done = False
variable = re.compile("\\$(\\w+)")
index = 0
while not done and index < 100:
index += 1
done = True
for varline in fields:
if isinstance(fields[varline], dict):
fields[varline] = self._resolve_css_params(fields[varline], subs)
elif fields[varline] is not None and type(fields[varline]) == str:
_vars = variable.finditer(fields[varline])
for var in _vars:
subvar = var.group()[1:]
if subvar in subs:
done = False
fields[varline] = fields[varline].replace(var.group(), subs[subvar], 1)
else:
self.logger.warning("Variable definition not found in CSS Skin variables: $%s", subvar)
fields[varline] = ""
if index == 100:
self.logger.warning("Unable to resolve CSS Skin variables, check for circular references")
return fields
@staticmethod
def _get_styles(style_str, name, field):
#
# Parse styles in order from a string and allow later entries to override earlier ones
#
result = {}
styles = style_str.split(";")
for style in styles:
if style != "" and style is not None:
pieces = style.split(":", 1)
result[pieces[0].strip()] = pieces[1]
return result
def _merge_styles(self, widget, name):
result = {}
for key in widget:
if key == "css" or key == "static_css":
result[key] = self._merge_styles(widget[key], name)
elif key.find("style") == -1:
result[key] = widget[key]
else:
line = ""
styles = self._get_styles(widget[key], name, key)
for style in styles:
line = line + style + ":" + styles[style] + ";"
result[key] = line
return result
def _do_subs(self, value, _vars):
if isinstance(value, dict):
result = {}
templates = {}
for (key, value) in value.items():
processed, t = self._do_subs(value, _vars)
result[key] = processed
templates = {**templates, **t}
return result, templates
elif isinstance(value, list):
result = []
templates = {}
for item in value:
processed, t = self._do_subs(item)
result.append(processed)
templates = {**templates, **t}
return result, templates
elif isinstance(value, str):
templates = {}
for ikey in _vars:
match = "{{{{{}}}}}".format(ikey)
if match in value:
templates[ikey] = 1
value = value.replace(match, _vars[ikey])
# Replace variables that are still left with an empty string.
value = re.sub("{{(.+)}}", "", value)
return value, templates
else:
return value, {}
# noinspection PyUnresolvedReferences
def _load_widget(self, dash, includes, name, css_vars, global_parameters): # noqa C901
instantiated_widget = None
#
# Check if we have already encountered a definition
#
for include in includes:
if name in include:
instantiated_widget = include[name]
#
# If not, go find it elsewhere
#
if instantiated_widget is None:
# Try to find in in a yaml file
yaml_path = os.path.join(self.dashboard_dir, "{}.yaml".format(name))
if os.path.isfile(yaml_path):
with open(yaml_path, "r") as yamlfd:
widget = yamlfd.read()
try:
instantiated_widget = self._load_yaml(widget)
except yaml.YAMLError as exc:
self._log_error(dash, name, "Error while parsing dashboard '{}':".format(yaml_path))
self._log_yaml_dash_error(dash, name, exc)
return self.error_widget("Error loading widget")
elif name.find(".") != -1:
#
# No file, check if it is implicitly defined via an entity id
#
parts = name.split(".")
instantiated_widget = {"widget_type": parts[0], "entity": name, "title_is_friendly_name": 1}
else:
self.logger.warning("Unable to find widget definition for '%s'", name)
# Return some valid data so the browser will render a blank widget
return self.error_widget("Widget definition not found")
widget_type = None
try:
if "widget_type" not in instantiated_widget:
return self.error_widget("Widget type not specified")
#
# One way or another we now have the widget definition
#
widget_type = instantiated_widget["widget_type"]
if widget_type == "text_sensor":
self.logger.warning(
"'text_sensor' widget is deprecated, please use 'sensor' instead for widget '%s'", name
)
# Check for custom base widgets first
if os.path.isdir(os.path.join(self.config_dir, "custom_widgets", widget_type)):
# This is a custom base widget so return it in full
return self._resolve_css_params(instantiated_widget, css_vars)
# Now regular base widgets
if os.path.isdir(os.path.join(self.dash_install_dir, "widgets", widget_type)):
# This is a base widget so return it in full
return self._resolve_css_params(instantiated_widget, css_vars)
# We are working with a derived widget so we need to do some merges and substitutions
# first check for custom widget
yaml_path = os.path.join(self.config_dir, "custom_widgets", "{}.yaml".format(widget_type))
if not os.path.isfile(yaml_path):
yaml_path = os.path.join(self.dash_install_dir, "widgets", "{}.yaml".format(widget_type))
try:
#
# Parse the derived widget definition
#
with open(yaml_path, "r") as yamlfd:
widget = yamlfd.read()
final_widget = self._load_yaml(widget)
except yaml.YAMLError as exc:
self._log_error(dash, name, "Error in widget definition '%s':".format(widget_type))
self._log_yaml_dash_error(dash, name, exc)
return self.error_widget("Error loading widget definition")
# Substitute variables in the parsed widget definition.
final_widget, templates = self._do_subs(final_widget, instantiated_widget)
#
# Add in global params
#
if global_parameters is not None:
for key in global_parameters:
if key == "devices":
if widget_type in global_parameters["devices"]:
for dkey in global_parameters["devices"][widget_type]:
if dkey not in instantiated_widget:
instantiated_widget[dkey] = global_parameters["devices"][widget_type][dkey]
else:
if key not in instantiated_widget:
instantiated_widget[key] = global_parameters[key]
#
# Override defaults with parameters in users definition
#
for key in instantiated_widget:
if key != "widget_type" and key not in templates:
# if it is an existing key and it is a style attribute, prepend, don't overwrite
if key in final_widget and key.find("style") != -1:
# if it is an existing key and it is a style attribute, prepend, don't overwrite
final_widget[key] = final_widget[key] + ";" + instantiated_widget[key]
else:
final_widget[key] = instantiated_widget[key]
if "fields" in final_widget and key in final_widget["fields"]:
final_widget["fields"][key] = instantiated_widget[key]
if "css" in final_widget and key in final_widget["css"]:
final_widget["css"][key] = final_widget["css"][key] + ";" + instantiated_widget[key]
if "static_css" in final_widget and key in final_widget["static_css"]:
final_widget["static_css"][key] = (
final_widget["static_css"][key] + ";" + instantiated_widget[key]
)
if "icons" in final_widget and key in final_widget["icons"]:
final_widget["icons"][key] = instantiated_widget[key]
if "static_icons" in final_widget and key in final_widget["static_icons"]:
final_widget["static_icons"][key] = instantiated_widget[key]
#
# Process variables from skin
#
final_widget = self._resolve_css_params(final_widget, css_vars)
#
# Merge styles
#
final_widget = self._merge_styles(final_widget, name)
return final_widget
except FileNotFoundError:
self.logger.warning("Unable to find widget type '%s'", widget_type)
self.logger.warning(traceback.format_exc())
# Return some valid data so the browser will render a blank widget
return self.error_widget("Unable to find widget type '{}'".format(widget_type))
@staticmethod
def error_widget(error):
return {"widget_type": "baseerror", "fields": {"err": error}, "static_css": {"widget_style": ""}}
@staticmethod
def _widget_exists(widgets, _id):
for widget in widgets:
if widget["id"] == _id:
return True
return False
def _add_layout(self, value, layout, occupied, dash, page, includes, css_vars, global_parameters):
if value is None:
return
widget_dimensions = re.compile("^(.+)\\((\\d+)x(\\d+)\\)$")
value = "".join(value.split())
widgets = value.split(",")
column = 1
for wid in widgets:
size = widget_dimensions.search(wid)
if size:
name = size.group(1)
xsize = size.group(2)
ysize = size.group(3)
elif "widget_size" in dash:
name = wid
xsize = dash["widget_size"][0]
ysize = dash["widget_size"][1]
else:
name = wid
xsize = 1
ysize = 1
while "{}x{}".format(column, layout) in occupied:
column += 1
if name != "spacer":
sanitized_name = name.replace(".", "-").replace("_", "-").lower()
widget = {"id": "{}-{}".format(page, sanitized_name)}
if self._widget_exists(dash["widgets"], widget["id"]):
self.logger.warning("Duplicate widget name '%s' - ignored", name)
else:
widget["position"] = [column, layout]
widget["size"] = [xsize, ysize]
widget["parameters"] = self._load_widget(dash, includes, name, css_vars, global_parameters)
dash["widgets"].append(widget)
for x in range(column, column + int(xsize)):
for y in range(layout, layout + int(ysize)):
occupied["{}x{}".format(x, y)] = 1
column += int(xsize)
@staticmethod
def _merge_dashes(dash1, dash2):
for key in dash2:
if key == "widgets":
for widget in dash2["widgets"]:
dash1["widgets"].append(widget)
elif key == "errors":
for error in dash2["errors"]:
dash1["errors"].append(error)
else:
dash1[key] = dash2[key]
return dash1
def _log_error(self, dash, name, error):
dash["errors"].append("{}: {}".format(os.path.basename(name), error))
self.logger.warning(error)
def _log_yaml_error(self, exc):
for line in self._yaml_error_lines(exc):
self.logger.warning(line)
def _log_yaml_dash_error(self, dash, name, exc):
for line in self._yaml_error_lines(exc):
self._log_error(dash, name, line)
@staticmethod
def _yaml_error_lines(exc):
lines = []
if hasattr(exc, "problem_mark"):
lines.append("parser says")
lines.append(str(exc.problem_mark))
if exc.context is not None:
lines.append(str(exc.problem) + " " + str(exc.context))
else:
lines.append(str(exc.problem))
return lines
def _load_yaml(self, stream):
myyaml = None
yaml.add_constructor("!secret", ha._secret_yaml, Loader=yaml.SafeLoader)
try:
myyaml = yaml.load(stream, Loader=yaml.SafeLoader)
except ValueError as v:
self.logger.warning(str(v))
return myyaml
def _create_dash(self, name, css_vars):
dash, layout, occupied, includes = self._create_sub_dash(name, "dash", 0, {}, [], 1, css_vars, None)
return dash
# noinspection PyBroadException
def _create_sub_dash( # noqa: C901
self, name, extension, layout, occupied, includes, level, css_vars, global_parameters
):
if extension == "dash":
dash = {"title": "HADashboard", "widget_dimensions": [120, 120], "widget_margins": [5, 5], "columns": 8}
else:
dash = {}
dash["widgets"] = []
dash["errors"] = []
valid_params = [
"title",
"widget_dimensions",
"widget_margins",
"columns",
"widget_size",
"rows",
"namespace",
"scalable",
]
layouts = []
if level > self.max_include_depth:
self._log_error(dash, name, "Maximum include level reached ({})".format(self.max_include_depth))
return dash, layout, occupied, includes
dashfile = os.path.join(self.dashboard_dir, "{}.{}".format(name, extension))
page = "default"
try:
with open(dashfile, "r") as yamlfd:
defs = yamlfd.read()
except Exception:
self._log_error(dash, name, "Error opening dashboard file '{}'".format(dashfile))
return dash, layout, occupied, includes
try:
dash_params = self._load_yaml(defs)
except yaml.YAMLError as exc:
self._log_error(dash, name, "Error while parsing dashboard '{}':".format(dashfile))
self._log_yaml_dash_error(dash, name, exc)
return dash, layout, occupied, includes
if dash_params is not None:
if "global_parameters" in dash_params:
if extension == "dash":
global_parameters = dash_params["global_parameters"]
else:
self.logger.warning(
"global_parameters dashboard directive illegal in imported dashboard '%s.%s'", name, extension
)
if global_parameters is None:
global_parameters = {"namespace": "default"}
if "namespace" not in global_parameters:
global_parameters["namespace"] = "default"
for param in dash_params:
if param == "layout" and dash_params[param] is not None:
for lay in dash_params[param]:
layouts.append(lay)
elif param in valid_params:
if extension == "dash":
dash[param] = dash_params[param]
else:
self.logger.warning(
"Top level dashboard directive illegal in imported dashboard '%s.%s': %s: %s",
name,
extension,
param,
dash_params[param],
)
else:
includes.append({param: dash_params[param]})
for lay in layouts:
if isinstance(lay, dict):
if "include" in lay:
new_dash, layout, occupied, includes = self._create_sub_dash(
os.path.join(self.dashboard_dir, lay["include"]),
"yaml",
layout,
occupied,
includes,
level + 1,
css_vars,
global_parameters,
)
if new_dash is not None:
self._merge_dashes(dash, new_dash)
elif "empty" in lay:
layout += lay["empty"]
else:
self._log_error(dash, name, "Incorrect directive, should be 'include or empty': {}".format(lay))
else:
layout += 1
self._add_layout(lay, layout, occupied, dash, page, includes, css_vars, global_parameters)
return dash, layout, occupied, includes
@staticmethod
def _latest_file(path):
late_file = datetime.datetime.fromtimestamp(86400)
for root, subdirs, files in os.walk(path):
for file in files:
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(os.path.join(root, file)))
if mtime > late_file:
late_file = mtime
return late_file
# noinspection PyBroadException
def _get_dash(self, name, skin, skindir): # noqa C901
pydashfile = os.path.join(self.dashboard_dir, "{}.pydash".format(name))
dashfile = os.path.join(self.dashboard_dir, "{}.dash".format(name))
#
# Grab CSS Variables
#
css_vars = self._load_css_params(skin, skindir)
if css_vars is None:
return None
if os.path.isfile(pydashfile):
with open(pydashfile, "r") as dashfd:
dash = ast.literal_eval(dashfd.read())
elif os.path.isfile(dashfile):
dash = self._create_dash(name, css_vars)
if dash is None:
return None
else:
self.logger.warning("Dashboard '%s' not found", name)
return None
if "head_includes" in css_vars and css_vars["head_includes"] is not None:
dash["head_includes"] = css_vars["head_includes"]
else:
dash["head_includes"] = []
#
# adds custom_javascripts to the head includes if they exist
#
custom_js = os.path.join(self.config_dir, "custom_javascript")
if os.path.isdir(custom_js):
for filename in os.listdir(custom_js):
if filename.endswith(".js"):
dash["head_includes"].append(
'<script type="text/javascript" src="custom_javascript/{}"></script>'.format(filename)
)
if "body_includes" in css_vars and css_vars["body_includes"] is not None:
dash["body_includes"] = css_vars["body_includes"]
else:
dash["body_includes"] = []
#
# Load Widgets
#
widgets = self._get_widgets()
css = ""
js = ""
rendered_css = None
widget = None
try:
#
# Base CSS template and compile
#
if not os.path.isfile(os.path.join(skindir, "dashboard.css")):
self.logger.warning("Error loading dashboard.css for skin '%s'", skin)
else:
template = os.path.join(skindir, "dashboard.css")
with open(template, "r") as cssfd:
csstemplate = cssfd.read()
rendered_css, subs = self._do_subs(csstemplate, css_vars)
css = css + rendered_css + "\n"
#
# Template and compile widget CSS
#
for widget in dash["widgets"]:
css_template = Environment(loader=BaseLoader).from_string(
widgets[widget["parameters"]["widget_type"]]["css"]
)
css_vars["id"] = widget["id"]
rendered_css = css_template.render(css_vars)
css = css + rendered_css + "\n"
for widget in widgets:
js = js + widgets[widget]["js"] + "\n"
except KeyError:
self.logger.warning("Widget type not found: %s", widget["parameters"]["widget_type"])
return None
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error in CSS file")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
if rendered_css is not None:
self.logger.warning("Rendered CSS:")
self.logger.warning(rendered_css)
self.logger.warning("-" * 60)
return None
if not os.path.exists(os.path.join(self.compiled_css_dir, skin)):
os.makedirs(os.path.join(self.compiled_css_dir, skin))
css_path = os.path.join(self.compiled_css_dir, skin, "{}_application.css".format(name.lower()))
with open(css_path, "w") as css_file:
css_file.write(css)
if not os.path.exists(self.compiled_javascript_dir):
os.makedirs(self.compiled_javascript_dir)
if not os.path.exists(os.path.join(self.compiled_javascript_dir, skin)):
os.makedirs(os.path.join(self.compiled_javascript_dir, skin))
if not os.path.exists(self.compiled_html_dir):
os.makedirs(self.compiled_html_dir)
if not os.path.exists(os.path.join(self.compiled_html_dir, skin)):
os.makedirs(os.path.join(self.compiled_html_dir, skin))
js_path = os.path.join(self.compiled_javascript_dir, "application.js")
with open(js_path, "w") as js_file:
js_file.write(js)
for widget in dash["widgets"]:
html = widgets[widget["parameters"]["widget_type"]]["html"].replace("\n", "").replace("\r", "")
widget["html"] = html
return dash
def _get_widgets(self):
widgets = {}
for widget_dir in [
os.path.join(self.dash_install_dir, "widgets"),
os.path.join(self.config_dir, "custom_widgets"),
]:
# widget_dir = os.path.join(self.dash_install_dir, "widgets")
if os.path.isdir(widget_dir):
widget_dirs = os.listdir(path=widget_dir)
for widget in widget_dirs:
if widget_dir == os.path.join(self.config_dir, "custom_widgets"):
self.access.info("Loading custom widget '%s'", widget)
if os.path.isdir(os.path.join(widget_dir, widget)):
jspath = os.path.join(widget_dir, widget, "{}.js".format(widget))
csspath = os.path.join(widget_dir, widget, "{}.css".format(widget))
htmlpath = os.path.join(widget_dir, widget, "{}.html".format(widget))
with open(jspath, "r") as fd:
js = fd.read()
with open(csspath, "r") as fd:
css = fd.read()
with open(htmlpath, "r") as fd:
html = fd.read()
widgets[widget] = {"js": js, "css": css, "html": html}
return widgets
def _list_dashes(self):
if not os.path.isdir(self.dashboard_dir):
return {}
files = os.listdir(self.dashboard_dir)
dash_list = OrderedDict()
for file in sorted(files):
if file.endswith(".pydash"):
name = file.replace(".pydash", "")
dash_list[name] = "{}/{}".format(self.base_url, name)
elif file.endswith(".dash"):
name = file.replace(".dash", "")
dash_list[name] = "{}/{}".format(self.base_url, name)
params = {"dash_list": dash_list, "main": "1"}
return params
def _conditional_compile(self, name, skin, recompile):
#
# Check skin exists
#
skindir = os.path.join(self.config_dir, "custom_css", skin)
if os.path.isdir(skindir):
self.access.info("Loading custom skin '%s'", skin)
else:
# Not a custom skin, try product skins
skindir = os.path.join(self.css_dir, skin)
if not os.path.isdir(skindir):
self.logger.warning("Skin '%s' does not exist", skin)
skin = "default"
skindir = os.path.join(self.css_dir, "default")
if self.dash_force_compile is False:
do_compile = False
if recompile is True:
do_compile = True
#
# Check if compiled versions even exist and get their timestamps.
#
last_compiled = datetime.datetime.now()
for file in [
os.path.join(self.compiled_css_dir, skin, "{}_application.css".format(name.lower())),
os.path.join(self.compiled_javascript_dir, "application.js"),
os.path.join(self.compiled_javascript_dir, skin, "{}_init.js".format(name.lower())),
os.path.join(self.compiled_html_dir, skin, "{}_head.html".format(name.lower())),
os.path.join(self.compiled_html_dir, skin, "{}_body.html".format(name.lower())),
]:
if not os.path.isfile(file):
do_compile = True
try:
mtime = os.path.getmtime(file)
except OSError:
mtime = 86400
last_modified_date = datetime.datetime.fromtimestamp(mtime)
if last_modified_date < last_compiled:
last_compiled = last_modified_date
widget_mod = self._latest_file(os.path.join(self.dash_install_dir, "widgets"))
custom_widget_mod = self._latest_file(os.path.join(self.config_dir, "custom_widgets"))
skin_mod = self._latest_file(skindir)
dash_mod = self._latest_file(self.dashboard_dir)
if (
custom_widget_mod > last_compiled
or widget_mod > last_compiled
or skin_mod > last_compiled
or dash_mod > last_compiled
):
do_compile = True
# Force compilation at startup
if self.start_time > last_compiled and self.dash_compile_on_start is True:
do_compile = True
if do_compile is False:
return {"errors": []}
self.access.info("Compiling dashboard '%s'", name)
dash = self._get_dash(name, skin, skindir)
if dash is None:
dash_list = self._list_dashes()
return {"errors": ["Dashboard has errors or is not found - check log for details"], "dash_list": dash_list}
params = dash
params["base_url"] = self.base_url
params["name"] = name.lower()
params["skin"] = skin
params["transport"] = self.transport
#
# Build dash specific code
#
env = Environment(loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]))
template = env.get_template("dashinit.jinja2")
rendered_template = template.render(params)
js_path = os.path.join(self.compiled_javascript_dir, skin, "{}_init.js".format(name.lower()))
with open(js_path, "w") as js_file:
js_file.write(rendered_template)
template = env.get_template("head_include.jinja2")
rendered_template = template.render(params)
js_path = os.path.join(self.compiled_html_dir, skin, "{}_head.html".format(name.lower()))
with open(js_path, "w") as js_file:
js_file.write(rendered_template)
template = env.get_template("body_include.jinja2")
rendered_template = template.render(params)
js_path = os.path.join(self.compiled_html_dir, skin, "{}_body.html".format(name.lower()))
with open(js_path, "w") as js_file:
js_file.write(rendered_template)
return dash
#
# Methods
#
@_profile_this
@_timeit
def get_dashboard(self, name, skin, recompile):
try:
dash = self._conditional_compile(name, skin, recompile)
if dash is None:
errors = ["An unrecoverable error occurred - check log for details"]
else:
errors = dash["errors"]
if "widgets" in dash:
widgets = dash["widgets"]
else:
widgets = {}
if "scalable" in dash:
scalable = dash["scalable"]
else:
scalable = True
if "dash_list" in dash and dash["dash_list"] != {}:
dash_list = dash["dash_list"]["dash_list"]
else:
dash_list = []
# add errors if we got any
if errors:
params = {"title": self.title, "errors": errors, "name": name.lower(), "dash_list": dash_list}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"])
)
template = env.get_template("list.jinja2")
rendered_template = template.render(params)
else:
include_path = os.path.join(self.compiled_html_dir, skin, "{}_head.html".format(name.lower()))
with open(include_path, "r") as include_file:
head_includes = include_file.read()
include_path = os.path.join(self.compiled_html_dir, skin, "{}_body.html".format(name.lower()))
with open(include_path, "r") as include_file:
body_includes = include_file.read()
#
# return params
#
params = {
"title": self.title,
"errors": errors,
"name": name.lower(),
"skin": skin,
"widgets": widgets,
"head_includes": head_includes,
"body_includes": body_includes,
"scalable": scalable,
"fa4compatibility": self.fa4compatibility,
"transport": self.transport,
}
env = Environment(
loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"])
)
template = env.get_template("dashboard.jinja2")
rendered_template = template.render(params)
return rendered_template
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during DASH creation")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
return self.html_error()
def html_error(self):
params = {"errors": ["An unrecoverable error occurred fetching dashboard, check log for details"]}
env = Environment(loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]))
template = env.get_template("list.jinja2")
rendered_template = template.render(params)
return rendered_template
def get_dashboard_list(self, paramOverwrite=None):
if paramOverwrite is None:
dash = self._list_dashes()
else:
dash = paramOverwrite
env = Environment(loader=FileSystemLoader(self.template_dir), autoescape=select_autoescape(["html", "xml"]))
template = env.get_template("list.jinja2")
rendered_template = template.render(dash)
return rendered_template
|
the-stack_106_29869 | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.http import HttpResponse
from django.urls import path
from django.views import defaults as default_views
from django.views.decorators.cache import cache_control
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
schema_view = get_schema_view(
openapi.Info(
title='Gnosis Safe Transaction Service API',
default_version='v1',
description='API to store safe multisig transactions',
contact=openapi.Contact(email='[email protected]'),
license=openapi.License(name='MIT License'),
),
validators=['flex', 'ssv'],
public=True,
)
schema_cache_timeout = 60 * 5 # 5 minutes
schema_cache_decorator = cache_control(max_age=schema_cache_timeout)
urlpatterns = [
url(r'^$',
schema_cache_decorator(schema_view.with_ui('swagger', cache_timeout=0)),
name='schema-swagger-ui'),
url(r'^swagger(?P<format>\.json|\.yaml)$',
schema_cache_decorator(schema_view.without_ui(cache_timeout=schema_cache_timeout)),
name='schema-json'),
url(r'^redoc/$',
schema_cache_decorator(schema_view.with_ui('redoc', cache_timeout=schema_cache_timeout)),
name='schema-redoc'),
url(settings.ADMIN_URL, admin.site.urls),
url(r'^api/v1/', include('safe_transaction_service.history.urls', namespace='v1')),
url(r'^check/', lambda request: HttpResponse("Ok"), name='check'),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(
r"^400/$",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
url(
r"^403/$",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
url(
r"^404/$",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
url(r"^500/$", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls)),] + urlpatterns
admin.autodiscover()
|
the-stack_106_29870 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpdateJobParameters(Model):
"""The parameters that can be used to update existing Data Lake Analytics job
information properties. (Only for use internally with Scope job type.).
:param degree_of_parallelism: the degree of parallelism used for this job.
This must be greater than 0, if set to less than 0 it will default to 1.
:type degree_of_parallelism: int
:param priority: the priority value for the current job. Lower numbers
have a higher priority. By default, a job has a priority of 1000. This
must be greater than 0.
:type priority: int
:param tags: the key-value pairs used to add additional metadata to the
job information. (Only for use internally with Scope job type.)
:type tags: dict[str, str]
"""
_attribute_map = {
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
'priority': {'key': 'priority', 'type': 'int'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, degree_of_parallelism=None, priority=None, tags=None):
super(UpdateJobParameters, self).__init__()
self.degree_of_parallelism = degree_of_parallelism
self.priority = priority
self.tags = tags
|
the-stack_106_29871 | import datetime
import json
from pathlib import Path
import astropy.units as u
from astroplan import Observer
from astropy.time import Time
from astroplan import moon
class Celestial:
"""Provides information about celestial sightings relative to Virginia.
Methods:
- get_next_event_after_dt: Get next rise or set of a given celestial body.
- get_next_moon_event: Get next full or new moon.
- get_moon_phase: Get the current moon phase.
"""
BODIES = ["moon", "sun", "venus", "mars", "jupiter", "orion"]
MOON_EVENTS = ["full", "new"]
def __init__(self):
dirname = Path(__file__).parent
self.charts = {}
for body in self.BODIES:
fname = dirname / ("charts/%s-va.json" % body)
with open(fname) as f:
self.charts[body] = json.loads(f.read())
self.moon_charts = {}
for event in self.MOON_EVENTS:
fname = dirname / ("charts/%s-moon.json" % event)
with open(fname) as f:
self.moon_charts[event] = json.loads(f.read())
@staticmethod
def _get_datetime_from_iso(str):
return datetime.datetime.strptime(str, "%Y-%m-%dT%H:%M:%S.%fZ")
def get_next_event(self, body="moon", event="rise"):
now_dt = datetime.datetime.now()
return self.get_next_event_after_dt(now_dt, body, event)
def get_next_event_after_dt(self, start_dt, body="moon", event="rise"):
"""Get the next rise/set after a given time.
Fetches date by searching the chart of rise/set times as obtained from
the US Naval Observatory at https://aa.usno.navy.mil.
"""
# Other methods, such as using astropy's astroplan, were too slow.
for day in self.charts[body]:
if not event in day:
continue # no rise/set that day
event_dt = self._get_datetime_from_iso(day[event]["time"])
# Found the first event after the current time.
# Assumes sequential order in the chart
if event_dt > start_dt:
azimuth = day[event]["azimuth"]
return (event_dt, azimuth)
def get_next_moon_event(self, event, start_dt):
"""Get the time of the next new or full moon after the given date."""
for date in self.moon_charts[event]:
event_dt = self._get_datetime_from_iso(date)
# Assume cronological order in list
if event_dt > start_dt:
return event_dt
def get_moon_phase(self):
"""Get the current moon phase and waxing/waning information."""
# These numbers are just guesses.
phases = {
"new": (0, 0.005),
"crescent": (0.005, 0.47),
"quarter": (0.47, 0.53),
"gibbous": (0.53, 0.9925),
"full": (0.9925, 1),
}
now_dt = datetime.datetime.now()
illumination = moon.moon_illumination(Time(now_dt))
for phase, (lower, upper) in phases.items():
if lower < illumination <= upper:
current_phase = phase
break
yesterday = Time(now_dt - datetime.timedelta(hours=1))
trend = (
"waning" if moon.moon_illumination(yesterday) > illumination else "waxing"
)
return (trend, current_phase, illumination)
|
the-stack_106_29874 | """Cli process commands"""
import os
from itertools import chain
from functools import partial
from operator import attrgetter
from multiprocessing.dummy import Pool
from vimanga import api, utils
def find(search='',
threads=3,
chapters=None,
directory=None,
download=False,
convert_to='images',
**kwargs):
"""Find mangas cli interface"""
mangas = next(api.core.get_mangas(search, **kwargs))
if not chapters:
return '\n'.join(
map(lambda x: '{}, {}'.format(x.name, x.score), mangas.data)
)
manga = mangas.data[0]
print('Manga: {}'.format(manga.name))
manga_chapters = api.core.get_chapters(manga)
filters_chapters = _filter_chapters(manga_chapters, chapters)
filters_chapters = sorted(filters_chapters, key=lambda x: float(x.number))
if not download:
return '\n'.join(
map(lambda x: 'Capitulo {}'.format(x.number), filters_chapters)
)
pool = Pool(threads)
directory = directory or os.path.expanduser('~')
manga_folder = os.path.join(directory, manga.name)
generator = pool.imap_unordered(utils.download_chapter, filters_chapters)
for chapter, data in generator:
if convert_to == 'images':
utils.convert_to_images('{}.jpg',
chapter.number,
manga_folder,
data)
else:
utils.convert_to_pdf(f'Capitulo {chapter.number}.pdf',
data,
manga_folder)
def _contain(numbers, chapter):
if isinstance(numbers, str):
numbers = numbers.split(',')
truth_table = []
for number in numbers:
try:
value = [int(number)]
except ValueError:
_min, _max = map(int, number.split('to'))
value = range(_min, _max + 1)
truth_table.append(int(float(chapter.number)) in value)
return any(truth_table)
def _filter_chapters(chapters, numbers):
chapters = map(attrgetter('data'), chapters)
chapters = chain.from_iterable(chapters)
return filter(partial(_contain, numbers), chapters)
|
the-stack_106_29875 | import cv2
import glob
import random
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
def summarize_data(ds, show_classes=False):
print("Number of training examples = ", ds.get_size_of_train())
print("Number of validation examples = ", ds.get_size_of_valid())
print("Number of testing examples = ", ds.get_size_of_test())
print("Image data shape = ", ds.get_size_of_image())
print("Number of classes (labels) = ", ds.get_num_of_classes())
if(show_classes):
print("The class (label) names are listed as below")
for cls in ds.get_classes():
print(ds.get_class_name(cls))
def plot_sign_images(ds):
"""
Plot one sign image for each class
"""
images, labels = ds.get_train()
visited = set()
demos = []
num_of_classes = ds.get_num_of_classes()
for i, class_id in enumerate(labels):
if class_id not in visited:
visited.add(class_id)
#print(ds.get_class_name(class_id))
demos.append(images[i])
if len(visited) == num_of_classes:
break
show_images(demos)
def show_images(images, cols=8, cmap=None):
"""
print images
"""
rows = (len(images) + cols - 1) // cols
plt.figure(figsize=(20, 20)) #
for i, image in enumerate(images):
plt.subplot(rows, cols, i+1)
# use gray scale color map if there is only one channel
cmap = 'gray' if len(image.shape)==2 else cmap
plt.imshow(image, cmap=cmap)
plt.xticks([])
plt.yticks([])
plt.tight_layout(pad=0, h_pad=0, w_pad=0)
plt.show()
def generate_noisy_image(img):
def random_rotate(image):
if random.randrange(2) == 0:
angle = random.randrange(10)
else:
angle = random.randrange(10) * -1
M = cv2.getRotationMatrix2D((16, 16), angle, 1.0)
return cv2.warpAffine(image, M, (32, 32))
def random_blur(img):
"""
Blur the image
"""
# kernel size options: 1, 3
kernel_size = 2 * random.randrange(2) + 1
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def random_zoom(img):
"""
Zoom out a grayscale image
"""
inc = random.randrange(5)
return cv2.resize(img, (32 + 2*inc, ) * 2)[inc:inc+32, inc:inc+32]
return random_zoom(
random_blur(
random_rotate(img)
)
)
def get_new_data(image_size):
labels = []
images = []
names = []
for fname in glob.glob('new_images/*'):
labels.append(int(fname.split('/')[-1].split('-')[0]))
names.append(fname.split('/')[-1].split('-', 1)[1][:-4])
img = cv2.resize(mpimg.imread(fname), image_size, interpolation=cv2.INTER_CUBIC)
images.append(img)
labels = np.array(labels)
# apply our preprocessing: grayscale conversion and normalization
images = np.sum(np.array(images) / 3.0, axis=3, keepdims=True)
images = (images - 128.0) / 128.0
return images, labels
def show_new_data():
images = []
for fname in glob.glob('new_images/*'):
images.append(mpimg.imread(fname))
show_images(images, cols=5) |
the-stack_106_29878 | import sys
sys.path.insert(0, '../common_python')
import os
import pytest
import tools
def skeleton_jag_reconstruction_loss(cluster, executables, dir_name, compiler_name,
weekly, data_reader_percent):
if compiler_name not in executables:
e = 'skeleton_jag_reconstruction_loss: default_exes[%s] does not exist' % compiler_name
print('Skip - ' + e)
pytest.skip(e)
output_file_name = '%s/bamboo/unit_tests/output/jag_reconstruction_loss_%s_output.txt' % (dir_name, compiler_name)
error_file_name = '%s/bamboo/unit_tests/error/jag_reconstruction_loss_%s_error.txt' % (dir_name, compiler_name)
command = tools.get_command(
cluster=cluster,
executable=executables[compiler_name],
num_nodes=2,
num_processes=32,
disable_cuda=1,
dir_name=dir_name,
data_filedir_default='/p/lscratchh/brainusr/datasets/10MJAG/1M_A/100K4trainers',
data_reader_name='jag',
data_reader_percent='prototext',
metadata='model_zoo/models/jag/wae_cycle_gan/jag_100M_metadata.prototext',
model_folder='tests',
model_name='jag_single_layer_ae',
optimizer_name='adam',
output_file_name=output_file_name,
error_file_name=error_file_name, weekly=weekly)
return_code = os.system(command)
tools.assert_success(return_code, error_file_name)
def test_unit_jag_reconstruction_loss_clang6(cluster, exes, dirname,
weekly, data_reader_percent):
skeleton_jag_reconstruction_loss(cluster, exes, dirname, 'clang6',
weekly, data_reader_percent)
def test_unit_jag_reconstruction_loss_gcc7(cluster, exes, dirname,
weekly, data_reader_percent):
skeleton_jag_reconstruction_loss(cluster, exes, dirname, 'gcc7',
weekly, data_reader_percent)
def test_unit_jag_reconstruction_loss_intel19(cluster, exes, dirname,
weekly, data_reader_percent):
skeleton_jag_reconstruction_loss(cluster, exes, dirname, 'intel19',
weekly, data_reader_percent)
# Run with python3 -m pytest -s test_unit_ridge_regression.py -k 'test_unit_jag_reconstruction_loss_exe' --exe=<executable>
def test_unit_jag_reconstruction_loss_exe(cluster, dirname, exe,
weekly, data_reader_percent):
if exe is None:
e = 'test_unit_jag_reconstruction_loss_exe: Non-local testing'
print('Skip - ' + e)
pytest.skip(e)
exes = {'exe': exe}
skeleton_jag_reconstruction_loss(cluster, exes, dirname, 'exe',
weekly, data_reader_percent)
|
the-stack_106_29879 | import numpy as np
from .pyscf_rks import rks_pyscf, get_vxc
from pydmfet import tools
from .fermi import find_efermi, entropy_corr
from functools import reduce
def kernel(ks):
fock = ks.oei + ks.vhxc
if ks.vext_1e is not None:
fock += ks.vext_1e
fock = 0.5*(fock.T + fock)
eigenvals, eigenvecs = np.linalg.eigh( fock )
idx = np.argmax(abs(eigenvecs), axis=0)
eigenvecs[:,eigenvecs[ idx, np.arange(len(eigenvals)) ]<0] *= -1
Nocc = ks.Ne//2 #closed shell
e_homo = eigenvals[Nocc-1]
e_lumo = eigenvals[Nocc]
print ('HOMO: ', e_homo, 'LUMO: ', e_lumo)
print ("mo_energy:")
print (eigenvals[:Nocc+5])
e_fermi = e_homo
mo_occ = np.zeros((ks.Norb))
if(ks.smear_sigma < 1e-8): #T=0
mo_occ[:Nocc] = 1.0
else: #finite T
e_fermi, mo_occ = find_efermi(eigenvals, ks.smear_sigma, Nocc, ks.Norb)
mo_occ*=2.0 #closed shell
Ne_error = np.sum(mo_occ) - ks.Ne
if(Ne_error > 1e-8):
print ('Ne error = ', Ne_error)
print ("fermi energy: ", e_fermi)
np.set_printoptions(precision=4)
flag = mo_occ > 1e-4
print (mo_occ[flag])
np.set_printoptions()
rdm1 = reduce(np.dot, (eigenvecs, np.diag(mo_occ), eigenvecs.T))
rdm1 = 0.5*(rdm1.T + rdm1)
energy = np.trace(np.dot(rdm1,fock))
es = entropy_corr(mo_occ, ks.smear_sigma)
print ('entropy correction: ', es)
energy += es
print ('e_tot = ', energy)
ks.mo_occ = mo_occ
ks.mo_energy = eigenvals
ks.mo_coeff = eigenvecs
ks.rdm1 = rdm1
ks.elec_energy = energy
class rks_nonscf(rks_pyscf):
def __init__(self, Ne, Norb, mf_method, mol=None, vext_1e = None, oei=None, vhxc=None, tei=None, ovlp=1, dm0=None,\
coredm=0.0, ao2sub=1.0, level_shift=0.0, smear_sigma = 0.0, max_cycle = 50):
rks_pyscf.__init__(self, Ne, Norb, mf_method, mol, vext_1e, oei, tei, ovlp, dm0, coredm, ao2sub, level_shift, smear_sigma)
self.max_cycle = max_cycle
if self.dm_guess is None:
raise ValueError("dm0 has to be set since it's used as the fixed density")
if self.tei is None:
raise ValueError("tei has to be set")
Kcoeff = self._numint.hybrid_coeff(self.xc)
self.vhxc = vhxc
if self.vhxc is None:
self.vhxc = tools.dm2jk(self.dm_guess, self.tei, Kcoeff)
if(self.method != 'hf'):
vxc_ao = get_vxc(self, self.mol, self.coredm + tools.dm_sub2ao(self.dm_guess, ao2sub))[2]
self.vhxc += tools.op_ao2sub(vxc_ao, ao2sub)
kernel = kernel
|
the-stack_106_29880 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MeterOpenModel import MeterOpenModel
class ExerciseItemOpenModelThird(object):
def __init__(self):
self._desc = None
self._external_item_id = None
self._item_code = None
self._meter_list = None
self._name = None
self._parent_item_code = None
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def external_item_id(self):
return self._external_item_id
@external_item_id.setter
def external_item_id(self, value):
self._external_item_id = value
@property
def item_code(self):
return self._item_code
@item_code.setter
def item_code(self, value):
self._item_code = value
@property
def meter_list(self):
return self._meter_list
@meter_list.setter
def meter_list(self, value):
if isinstance(value, MeterOpenModel):
self._meter_list = value
else:
self._meter_list = MeterOpenModel.from_alipay_dict(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def parent_item_code(self):
return self._parent_item_code
@parent_item_code.setter
def parent_item_code(self, value):
self._parent_item_code = value
def to_alipay_dict(self):
params = dict()
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.external_item_id:
if hasattr(self.external_item_id, 'to_alipay_dict'):
params['external_item_id'] = self.external_item_id.to_alipay_dict()
else:
params['external_item_id'] = self.external_item_id
if self.item_code:
if hasattr(self.item_code, 'to_alipay_dict'):
params['item_code'] = self.item_code.to_alipay_dict()
else:
params['item_code'] = self.item_code
if self.meter_list:
if hasattr(self.meter_list, 'to_alipay_dict'):
params['meter_list'] = self.meter_list.to_alipay_dict()
else:
params['meter_list'] = self.meter_list
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.parent_item_code:
if hasattr(self.parent_item_code, 'to_alipay_dict'):
params['parent_item_code'] = self.parent_item_code.to_alipay_dict()
else:
params['parent_item_code'] = self.parent_item_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExerciseItemOpenModelThird()
if 'desc' in d:
o.desc = d['desc']
if 'external_item_id' in d:
o.external_item_id = d['external_item_id']
if 'item_code' in d:
o.item_code = d['item_code']
if 'meter_list' in d:
o.meter_list = d['meter_list']
if 'name' in d:
o.name = d['name']
if 'parent_item_code' in d:
o.parent_item_code = d['parent_item_code']
return o
|
the-stack_106_29881 | import pytest
@pytest.fixture
def make_params(tmpdir):
p = tmpdir.mkdir("folder").join("test.txt")
p.write("test")
params = {
"dest_mailbox": "TESTMB",
"message_location": str(p),
"workflow_id": "TESTWF",
"message_subject": "TESTSUB",
"message_id": "TESTID",
"process_id": "TESTPROC",
"compress_message": True,
"encrypted": True,
}
return params
def track_args(**kwargs):
return kwargs
@pytest.fixture
def patch_message(mesh_connection, monkeypatch):
monkeypatch.setattr(mesh_connection, "send_message", track_args)
return mesh_connection
def test_SendFile_HandlesParams(patch_message, make_params):
params = patch_message.send_file(**make_params)
assert params == {
"dest_mailbox": "TESTMB",
"message": b"test",
"filename": "test.txt",
"workflow_id": "TESTWF",
"message_subject": "TESTSUB",
"message_id": "TESTID",
"process_id": "TESTPROC",
"compress_message": True,
"encrypted": True,
}
|
the-stack_106_29883 | # Mostly based on the code written by Tinghui Zhou:
# https://github.com/tinghuiz/SfMLearner/blob/master/utils.py
from __future__ import division
import numpy as np
import tensorflow as tf
def euler2mat(z, y, x):
"""Converts euler angles to rotation matrix
TODO: remove the dimension for 'N' (deprecated for converting all source
poses altogether)
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
z: rotation angle along z axis (in radians) -- size = [B, N]
y: rotation angle along y axis (in radians) -- size = [B, N]
x: rotation angle along x axis (in radians) -- size = [B, N]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, N, 3, 3]
"""
B = tf.shape(z)[0]
N = 1
z = tf.clip_by_value(z, -np.pi, np.pi)
y = tf.clip_by_value(y, -np.pi, np.pi)
x = tf.clip_by_value(x, -np.pi, np.pi)
# Expand to B x N x 1 x 1
z = tf.expand_dims(tf.expand_dims(z, -1), -1)
y = tf.expand_dims(tf.expand_dims(y, -1), -1)
x = tf.expand_dims(tf.expand_dims(x, -1), -1)
zeros = tf.zeros([B, N, 1, 1])
ones = tf.ones([B, N, 1, 1])
cosz = tf.cos(z)
sinz = tf.sin(z)
rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)
cosy = tf.cos(y)
siny = tf.sin(y)
roty_1 = tf.concat([cosy, zeros, siny], axis=3)
roty_2 = tf.concat([zeros, ones, zeros], axis=3)
roty_3 = tf.concat([-siny,zeros, cosy], axis=3)
ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)
cosx = tf.cos(x)
sinx = tf.sin(x)
rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)
rotMat = tf.matmul(tf.matmul(xmat, ymat), zmat)
return rotMat
def pose_vec2mat(vec):
"""Converts 6DoF parameters to transformation matrix
Args:
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 4, 4]
"""
batch_size, _ = vec.get_shape().as_list()
translation = tf.slice(vec, [0, 0], [-1, 3])
translation = tf.expand_dims(translation, -1)
rx = tf.slice(vec, [0, 3], [-1, 1])
ry = tf.slice(vec, [0, 4], [-1, 1])
rz = tf.slice(vec, [0, 5], [-1, 1])
rot_mat = euler2mat(rz, ry, rx)
rot_mat = tf.squeeze(rot_mat, axis=[1])
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch_size, 1, 1])
transform_mat = tf.concat([rot_mat, translation], axis=2)
transform_mat = tf.concat([transform_mat, filler], axis=1)
return transform_mat
def pixel2cam(depth, pixel_coords, intrinsics, is_homogeneous=True):
"""Transforms coordinates in the pixel frame to the camera frame.
Args:
depth: [batch, height, width]
pixel_coords: homogeneous pixel coordinates [batch, 3, height, width]
intrinsics: camera intrinsics [batch, 3, 3]
is_homogeneous: return in homogeneous coordinates
Returns:
Coords in the camera frame [batch, 3 (4 if homogeneous), height, width]
"""
batch, height, width = depth.get_shape().as_list()
depth = tf.reshape(depth, [batch, 1, -1])
pixel_coords = tf.reshape(pixel_coords, [batch, 3, -1])
cam_coords = tf.matmul(tf.matrix_inverse(intrinsics), pixel_coords) * depth
if is_homogeneous:
ones = tf.ones([batch, 1, height*width])
cam_coords = tf.concat([cam_coords, ones], axis=1)
cam_coords = tf.reshape(cam_coords, [batch, -1, height, width])
return cam_coords
def cam2pixel(cam_coords, proj):
"""Transforms coordinates in a camera frame to the pixel frame.
Args:
cam_coords: [batch, 4, height, width]
proj: [batch, 4, 4]
Returns:
Pixel coordinates projected from the camera frame [batch, height, width, 2]
"""
batch, _, height, width = cam_coords.get_shape().as_list()
cam_coords = tf.reshape(cam_coords, [batch, 4, -1])
unnormalized_pixel_coords = tf.matmul(proj, cam_coords)
x_u = tf.slice(unnormalized_pixel_coords, [0, 0, 0], [-1, 1, -1])
y_u = tf.slice(unnormalized_pixel_coords, [0, 1, 0], [-1, 1, -1])
z_u = tf.slice(unnormalized_pixel_coords, [0, 2, 0], [-1, 1, -1])
x_n = x_u / (z_u + 1e-10)
y_n = y_u / (z_u + 1e-10)
pixel_coords = tf.concat([x_n, y_n], axis=1)
pixel_coords = tf.reshape(pixel_coords, [batch, 2, height, width])
return tf.transpose(pixel_coords, perm=[0, 2, 3, 1])
def meshgrid(batch, height, width, is_homogeneous=True):
"""Construct a 2D meshgrid.
Args:
batch: batch size
height: height of the grid
width: width of the grid
is_homogeneous: whether to return in homogeneous coordinates
Returns:
x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
"""
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(
tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
if is_homogeneous:
ones = tf.ones_like(x_t)
coords = tf.stack([x_t, y_t, ones], axis=0)
else:
coords = tf.stack([x_t, y_t], axis=0)
coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
return coords
def flow_warp(src_img, flow):
""" inverse warp a source image to the target image plane based on flow field
Args:
src_img: the source image [batch, height_s, width_s, 3]
flow: target image to source image flow [batch, height_t, width_t, 2]
Returns:
Source image inverse warped to the target image plane [batch, height_t, width_t, 3]
"""
batch, height, width, _ = src_img.get_shape().as_list()
tgt_pixel_coords = tf.transpose(meshgrid(batch, height, width, False),
[0, 2, 3, 1])
src_pixel_coords = tgt_pixel_coords + flow
output_img = bilinear_sampler(src_img, src_pixel_coords)
return output_img
def compute_rigid_flow(depth, pose, intrinsics, reverse_pose=False):
"""Compute the rigid flow from target image plane to source image
Args:
depth: depth map of the target image [batch, height_t, width_t]
pose: target to source (or source to target if reverse_pose=True)
camera transformation matrix [batch, 6], in the order of
tx, ty, tz, rx, ry, rz;
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
Rigid flow from target image to source image [batch, height_t, width_t, 2]
"""
batch, height, width = depth.get_shape().as_list()
# Convert pose vector to matrix
pose = pose_vec2mat(pose)
if reverse_pose:
pose = tf.matrix_inverse(pose)
# Construct pixel grid coordinates
pixel_coords = meshgrid(batch, height, width)
tgt_pixel_coords = tf.transpose(pixel_coords[:,:2,:,:], [0, 2, 3, 1])
# Convert pixel coordinates to the camera frame
cam_coords = pixel2cam(depth, pixel_coords, intrinsics)
# Construct a 4x4 intrinsic matrix
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch, 1, 1])
intrinsics = tf.concat([intrinsics, tf.zeros([batch, 3, 1])], axis=2)
intrinsics = tf.concat([intrinsics, filler], axis=1)
# Get a 4x4 transformation matrix from 'target' camera frame to 'source'
# pixel frame.
proj_tgt_cam_to_src_pixel = tf.matmul(intrinsics, pose)
src_pixel_coords = cam2pixel(cam_coords, proj_tgt_cam_to_src_pixel)
rigid_flow = src_pixel_coords - tgt_pixel_coords
return rigid_flow
def bilinear_sampler(imgs, coords):
"""Construct a new image by bilinear sampling from the input image.
Points falling outside the source image boundary have value 0.
Args:
imgs: source image to be sampled from [batch, height_s, width_s, channels]
coords: coordinates of source pixels to sample from [batch, height_t,
width_t, 2]. height_t/width_t correspond to the dimensions of the output
image (don't need to be the same as height_s/width_s). The two channels
correspond to x and y coordinates respectively.
Returns:
A new sampled image [batch, height_t, width_t, channels]
"""
def _repeat(x, n_repeats):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([
n_repeats,
])), 1), [1, 0])
rep = tf.cast(rep, 'float32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
with tf.name_scope('image_sampling'):
coords_x, coords_y = tf.split(coords, [1, 1], axis=3)
inp_size = imgs.get_shape()
coord_size = coords.get_shape()
out_size = coords.get_shape().as_list()
out_size[3] = imgs.get_shape().as_list()[3]
coords_x = tf.cast(coords_x, 'float32')
coords_y = tf.cast(coords_y, 'float32')
x0 = tf.floor(coords_x)
x1 = x0 + 1
y0 = tf.floor(coords_y)
y1 = y0 + 1
y_max = tf.cast(tf.shape(imgs)[1] - 1, 'float32')
x_max = tf.cast(tf.shape(imgs)[2] - 1, 'float32')
zero = tf.zeros([1], dtype='float32')
x0_safe = tf.clip_by_value(x0, zero, x_max)
y0_safe = tf.clip_by_value(y0, zero, y_max)
x1_safe = tf.clip_by_value(x1, zero, x_max)
y1_safe = tf.clip_by_value(y1, zero, y_max)
## bilinear interp weights, with points outside the grid having weight 0
# wt_x0 = (x1 - coords_x) * tf.cast(tf.equal(x0, x0_safe), 'float32')
# wt_x1 = (coords_x - x0) * tf.cast(tf.equal(x1, x1_safe), 'float32')
# wt_y0 = (y1 - coords_y) * tf.cast(tf.equal(y0, y0_safe), 'float32')
# wt_y1 = (coords_y - y0) * tf.cast(tf.equal(y1, y1_safe), 'float32')
wt_x0 = x1_safe - coords_x
wt_x1 = coords_x - x0_safe
wt_y0 = y1_safe - coords_y
wt_y1 = coords_y - y0_safe
## indices in the flat image to sample from
dim2 = tf.cast(inp_size[2], 'float32')
dim1 = tf.cast(inp_size[2] * inp_size[1], 'float32')
base = tf.reshape(
_repeat(
tf.cast(tf.range(coord_size[0]), 'float32') * dim1,
coord_size[1] * coord_size[2]),
[out_size[0], out_size[1], out_size[2], 1])
base_y0 = base + y0_safe * dim2
base_y1 = base + y1_safe * dim2
idx00 = tf.reshape(x0_safe + base_y0, [-1])
idx01 = x0_safe + base_y1
idx10 = x1_safe + base_y0
idx11 = x1_safe + base_y1
## sample from imgs
imgs_flat = tf.reshape(imgs, tf.stack([-1, inp_size[3]]))
imgs_flat = tf.cast(imgs_flat, 'float32')
im00 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx00, 'int32')), out_size)
im01 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx01, 'int32')), out_size)
im10 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx10, 'int32')), out_size)
im11 = tf.reshape(tf.gather(imgs_flat, tf.cast(idx11, 'int32')), out_size)
w00 = wt_x0 * wt_y0
w01 = wt_x0 * wt_y1
w10 = wt_x1 * wt_y0
w11 = wt_x1 * wt_y1
output = tf.add_n([
w00 * im00, w01 * im01,
w10 * im10, w11 * im11
])
return output
|
the-stack_106_29885 | # MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module containing ART's exceptions.
"""
class ClassifierError(TypeError):
"""
Basic exception for errors raised by unexpected classifier types.
"""
def __init__(self, this_class, class_expected_list, classifier_given):
self.this_class = this_class
self.class_expected_list = class_expected_list
self.classifier_given = classifier_given
classes_expected_message = ""
for idx, class_expected in enumerate(class_expected_list):
if idx == 0:
classes_expected_message += "{0}".format(class_expected)
else:
classes_expected_message += " and {0}".format(class_expected)
self.message = (
"For {0} classifier must be an instance of {1}, "
"the provided classifier is instance of {2}.".format(
this_class.__name__, classes_expected_message, classifier_given
)
)
|
the-stack_106_29887 | import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
import gym
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
import cv2
import random
from tqdm import tqdm
import time
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
img_height = 125
img_width = 400
img_transforms = [
transforms.Resize((img_height, img_width)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
img_trans = transforms.Compose(img_transforms)
class Memory(object):
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
self.waypoint = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
del self.waypoint[:]
class ActorCritic(nn.Module):
def __init__(self, critic_state_dim, actor_input_dim, action_dim, action_std):
super(ActorCritic, self).__init__()
# action mean range -1 to 1
self.actor_conv = nn.Sequential(
nn.Conv2d(actor_input_dim, 32, 5, stride=3, padding=2), nn.LeakyReLU(), nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 5, stride=4, padding=2), nn.LeakyReLU(), nn.MaxPool2d(2, 2),
nn.Conv2d(64, 128, 3, stride=2, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 3, stride=2, padding=1), nn.LeakyReLU(),
)
self.actor_mlp = nn.Sequential(
nn.Linear(256, 128), nn.LeakyReLU(),
nn.Linear(128, 64), nn.LeakyReLU(),
nn.Linear(64, 32), nn.LeakyReLU(),
nn.Linear(32, 1), nn.Tanh()
)
# critic
self.critic = nn.Sequential(
nn.Linear(critic_state_dim, 64), nn.ReLU(),
nn.Linear(64, 128), nn.ReLU(),
nn.Linear(128, 32), nn.ReLU(),
nn.Linear(32, 1)
)
self.action_var = torch.full((action_dim,), action_std*action_std).to(device)
def forward(self):
raise NotImplementedError
def act(self, state, memory, waypoint, is_test):
state_cuda = state.to(device)
action_middle = self.actor_conv(state_cuda)
action_middle = action_middle.view(-1, 256)
action_mean = self.actor_mlp(action_middle)
# print(action_mean)
# action_mean = self.actor(state)
if is_test == False:
cov_mat = torch.diag(self.action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
action = dist.sample()
action_logprob = dist.log_prob(action)
# print(action_logprob)
memory.states.append(state.detach().cpu())
memory.actions.append(action.detach().cpu())
memory.logprobs.append(action_logprob.detach().cpu())
memory.waypoint.append(waypoint.detach().cpu())
return action.detach() if is_test == False else action_mean.detach()
def evaluate(self, state, action, waypoint):
# action_mean = self.actor(state)
action_middle = self.actor_conv(state)
action_middle = action_middle.view(-1, 256)
action_mean = self.actor_mlp(action_middle)
action_var = self.action_var.expand_as(action_mean)
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_value = self.critic(waypoint)
return action_logprobs, torch.squeeze(state_value), dist_entropy
class PPO(object):
def __init__(self, state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
# self.policy = ActorCritic(state_dim, action_dim, action_std).to(device)
self.policy = ActorCritic(critic_state_dim=state_dim, action_std=action_std,actor_input_dim=4, action_dim=1).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)
# self.policy_old = ActorCritic(state_dim, action_dim, action_std).to(device)
self.policy_old = ActorCritic(critic_state_dim=state_dim, action_std=action_std,actor_input_dim=4, action_dim=1).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def select_action(self, state, memory, waypoint, is_test=False):
# state = torch.FloatTensor(state.reshape(1, -1)).to(device)
# img = Image.fromarray(cv2.cvtColor(state,cv2.COLOR_BGR2RGB))
# state = img_trans(img).unsqueeze(0)
state = state.unsqueeze(0)
waypoint = torch.FloatTensor(waypoint.reshape(1, -1)).to(device)
return self.policy_old.act(state, memory, waypoint, is_test).cpu().data.numpy().flatten()
def update(self, memory):
# Monte Carlo estimate of rewards:
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(memory.rewards), reversed(memory.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards_np = np.array(rewards)
# rewards = torch.tensor(rewards, dtype=torch.float32).to(device)
# rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list to tensor
# old_states = torch.squeeze(torch.stack(memory.states).to(device), 1).detach()
# old_actions = torch.squeeze(torch.stack(memory.actions).to(device), 1).detach()
# old_logprobs = torch.squeeze(torch.stack(memory.logprobs), 1).to(device).detach()
# old_waypoint = torch.squeeze(torch.stack(memory.waypoint), 1).to(device).detach()
batch_size = 200
# Optimize policy for K epochs:
# for _ in range(self.K_epochs):
for i in tqdm(range(self.K_epochs), desc='Update policy'):
index = np.random.choice(len(memory.actions), size=batch_size)
states = np.array([t.squeeze(0).numpy() for t in memory.states])
actions = np.array(memory.actions)
logprobs = np.array(memory.logprobs)
waypoints = np.array([w.squeeze(0).numpy() for w in memory.waypoint])
batch_states = states[index]
batch_actions = actions[index]
batch_logprobs = logprobs[index]
batch_waypoints = waypoints[index]
batch_rewards = rewards_np[index]
old_states = torch.from_numpy(batch_states).to(device)
old_actions = torch.from_numpy(batch_actions).unsqueeze(1).to(device)
old_logprobs = torch.from_numpy(batch_logprobs).to(device)
old_waypoint = torch.from_numpy(batch_waypoints).to(device)
rewards = torch.tensor(batch_rewards, dtype=torch.float32).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# Evaluating old actions and values :
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions, old_waypoint)
# Finding the ratio (pi_theta / pi_theta__old):
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss:
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
# Copy new weights into old policy:
self.policy_old.load_state_dict(self.policy.state_dict())
def main():
############## Hyperparameters ##############
env_name = "BipedalWalker-v3"
render = False
solved_reward = 300 # stop training if avg_reward > solved_reward
log_interval = 2 # print avg reward in the interval
max_episodes = 10000 # max training episodes
max_timesteps = 1500 # max timesteps in one episode
update_timestep = 2000 # update policy every n timesteps
action_std = 0.5 # constant std for action distribution (Multivariate Normal)
K_epochs = 80 # update policy for K epochs
eps_clip = 0.2 # clip parameter for PPO
gamma = 0.99 # discount factor
lr = 0.0003 # parameters for Adam optimizer
betas = (0.9, 0.999)
random_seed = None
#############################################
# creating environment
env = gym.make(env_name)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
if random_seed:
print("Random Seed: {}".format(random_seed))
torch.manual_seed(random_seed)
env.seed(random_seed)
np.random.seed(random_seed)
memory = Memory()
ppo = PPO(state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip)
print(lr,betas)
# logging variables
running_reward = 0
avg_length = 0
time_step = 0
# training loop
for i_episode in range(1, max_episodes+1):
state = env.reset()
for t in range(max_timesteps):
time_step +=1
# Running policy_old:
action = ppo.select_action(state, memory)
state, reward, done, _ = env.step(action)
# Saving reward and is_terminals:
memory.rewards.append(reward)
memory.is_terminals.append(done)
# update if its time
if time_step % update_timestep == 0:
ppo.update(memory)
memory.clear_memory()
time_step = 0
running_reward += reward
if render:
env.render()
if done:
break
avg_length += t
# stop training if avg_reward > solved_reward
if running_reward > (log_interval*solved_reward):
print("########## Solved! ##########")
torch.save(ppo.policy.state_dict(), './PPO_continuous_solved_{}.pth'.format(env_name))
break
# save every 500 episodes
if i_episode % 500 == 0:
torch.save(ppo.policy.state_dict(), './PPO_continuous_{}.pth'.format(env_name))
# logging
if i_episode % log_interval == 0:
avg_length = int(avg_length/log_interval)
running_reward = int((running_reward/log_interval))
print('Episode {} \t Avg length: {} \t Avg reward: {}'.format(i_episode, avg_length, running_reward))
running_reward = 0
avg_length = 0
if __name__ == '__main__':
main()
# index = np.random.choice(len(memory.actions), size=batch_size)
# states = np.array(memory.states)
# actions = np.array(memory.actions)
# logprobs = np.array(memory.logprobs)
# waypoints = np.array(memory.waypoint)
# states_tensor = torch.FloatTensor(states[index])
# actions_tensor = torch.FloatTensor(actions[index])
# logprobs_tensor = torch.FloatTensor(logprobs[index])
# waypoints_tensor = torch.FloatTensor(waypoints[index])
# print(actions_tensor)
# old_states = torch.squeeze(torch.stack(states_tensor).to(device), 1).detach()
# old_actions = torch.squeeze(torch.stack(actions_tensor).to(device), 1).detach()
# old_logprobs = torch.squeeze(torch.stack(logprobs_tensor), 1).to(device).detach()
# old_waypoint = torch.squeeze(torch.stack(waypoints_tensor), 1).to(device).detach()
|
the-stack_106_29888 | # -*- coding: utf-8 -*-
import datetime
from functools import reduce
import json
import operator
import os
import uuid
try:
import configparser
except ImportError:
import ConfigParser as configparser
import arrow
import click
import requests
from .config import ConfigParser
from .frames import Frames
from .utils import deduplicate, make_json_writer, safe_save, sorted_groupby
from .version import version as __version__ # noqa
class WatsonError(RuntimeError):
pass
class ConfigurationError(configparser.Error, WatsonError):
pass
class Watson(object):
def __init__(self, **kwargs):
"""
:param frames: If given, should be a list representing the
frames.
If not given, the value is extracted
from the frames file.
:type frames: list
:param current: If given, should be a dict representing the
current frame.
If not given, the value is extracted
from the state file.
:type current: dict
:param config_dir: If given, the directory where the configuration
files will be
"""
self._current = None
self._old_state = None
self._frames = None
self._last_sync = None
self._config = None
self._config_changed = False
self._dir = (kwargs.pop('config_dir', None) or
click.get_app_dir('watson'))
self.config_file = os.path.join(self._dir, 'config')
self.frames_file = os.path.join(self._dir, 'frames')
self.state_file = os.path.join(self._dir, 'state')
self.last_sync_file = os.path.join(self._dir, 'last_sync')
if 'frames' in kwargs:
self.frames = kwargs['frames']
if 'current' in kwargs:
self.current = kwargs['current']
if 'last_sync' in kwargs:
self.last_sync = kwargs['last_sync']
def _load_json_file(self, filename, type=dict):
"""
Return the content of the the given JSON file.
If the file doesn't exist, return an empty instance of the
given type.
"""
try:
with open(filename) as f:
return json.load(f)
except IOError:
return type()
except ValueError as e:
# If we get an error because the file is empty, we ignore
# it and return an empty dict. Otherwise, we raise
# an exception in order to avoid corrupting the file.
if os.path.getsize(filename) == 0:
return type()
else:
raise WatsonError(
u"Invalid JSON file {}: {}".format(filename, e)
)
except Exception as e:
raise WatsonError(
u"Unexpected error while loading JSON file {}: {}".format(
filename, e
)
)
def _parse_date(self, date):
return arrow.Arrow.utcfromtimestamp(date).to('local')
def _format_date(self, date):
if not isinstance(date, arrow.Arrow):
date = arrow.get(date)
return date.timestamp
@property
def config(self):
"""
Return Watson's config as a ConfigParser object.
"""
if not self._config:
try:
config = ConfigParser()
config.read(self.config_file)
except configparser.Error as e:
raise ConfigurationError(
u"Cannot parse config file: {}".format(e))
self._config = config
return self._config
@config.setter
def config(self, value):
"""
Set a ConfigParser object as the current configuration.
"""
self._config = value
self._config_changed = True
def save(self):
"""
Save the state in the appropriate files. Create them if necessary.
"""
try:
if not os.path.isdir(self._dir):
os.makedirs(self._dir)
if self._current is not None and self._old_state != self._current:
if self.is_started:
current = {
'project': self.current['project'],
'start': self._format_date(self.current['start']),
'tags': self.current['tags'],
}
else:
current = {}
safe_save(self.state_file, make_json_writer(lambda: current))
self._old_state = current
if self._frames is not None and self._frames.changed:
safe_save(self.frames_file,
make_json_writer(self.frames.dump))
if self._config_changed:
safe_save(self.config_file, self.config.write)
if self._last_sync is not None:
safe_save(self.last_sync_file,
make_json_writer(self._format_date, self.last_sync))
except OSError as e:
raise WatsonError(
u"Impossible to write {}: {}".format(e.filename, e)
)
@property
def frames(self):
if self._frames is None:
self.frames = self._load_json_file(self.frames_file, type=list)
return self._frames
@frames.setter
def frames(self, frames):
self._frames = Frames(frames)
@property
def current(self):
if self._current is None:
self.current = self._load_json_file(self.state_file)
if self._old_state is None:
self._old_state = self._current
return dict(self._current)
@current.setter
def current(self, value):
if not value or 'project' not in value:
self._current = {}
if self._old_state is None:
self._old_state = {}
return
start = value.get('start', arrow.now())
if not isinstance(start, arrow.Arrow):
start = self._parse_date(start)
self._current = {
'project': value['project'],
'start': start,
'tags': value.get('tags') or []
}
if self._old_state is None:
self._old_state = self._current
@property
def last_sync(self):
if self._last_sync is None:
self.last_sync = self._load_json_file(
self.last_sync_file, type=int
)
return self._last_sync
@last_sync.setter
def last_sync(self, value):
if not value:
self._last_sync = arrow.get(0)
return
if not isinstance(value, arrow.Arrow):
value = self._parse_date(value)
self._last_sync = value
@property
def is_started(self):
return bool(self.current)
def add(self, project, from_date, to_date, tags):
if not project:
raise WatsonError("No project given.")
if from_date > to_date:
raise WatsonError("Task cannot end before it starts.")
default_tags = self.config.getlist('default_tags', project)
tags = (tags or []) + default_tags
frame = self.frames.add(project, from_date, to_date, tags=tags)
return frame
def start(self, project, tags=None, restart=False):
if not project:
raise WatsonError("No project given.")
if self.is_started:
raise WatsonError(
u"Project {} is already started.".format(
self.current['project']
)
)
default_tags = self.config.getlist('default_tags', project)
if not restart:
tags = (tags or []) + default_tags
self.current = {'project': project, 'tags': deduplicate(tags)}
return self.current
def stop(self):
if not self.is_started:
raise WatsonError("No project started.")
old = self.current
frame = self.frames.add(
old['project'], old['start'], arrow.now(), tags=old['tags']
)
self.current = None
return frame
def cancel(self):
if not self.is_started:
raise WatsonError("No project started.")
old_current = self.current
self.current = None
return old_current
@property
def projects(self):
"""
Return the list of all the existing projects, sorted by name.
"""
return sorted(set(self.frames['project']))
@property
def tags(self):
"""
Return the list of the tags, sorted by name.
"""
return sorted(set(tag for tags in self.frames['tags'] for tag in tags))
def _get_request_info(self, route):
config = self.config
dest = config.get('backend', 'url')
token = config.get('backend', 'token')
if dest and token:
dest = u"{}/{}/".format(
dest.rstrip('/'),
route.strip('/')
)
else:
raise ConfigurationError(
"You must specify a remote URL (backend.url) and a token "
"(backend.token) using the config command."
)
headers = {
'content-type': 'application/json',
'Authorization': "Token {}".format(token)
}
return dest, headers
def _get_remote_projects(self):
if not hasattr(self, '_remote_projects'):
dest, headers = self._get_request_info('projects')
try:
response = requests.get(dest, headers=headers)
assert response.status_code == 200
self._remote_projects = response.json()
except requests.ConnectionError:
raise WatsonError("Unable to reach the server.")
except AssertionError:
raise WatsonError(
u"An error occurred with the remote "
"server: {}".format(response.json())
)
return self._remote_projects['projects']
def pull(self):
dest, headers = self._get_request_info('frames')
try:
response = requests.get(
dest, params={'last_sync': self.last_sync}, headers=headers
)
assert response.status_code == 200
except requests.ConnectionError:
raise WatsonError("Unable to reach the server.")
except AssertionError:
raise WatsonError(
u"An error occurred with the remote "
"server: {}".format(response.json())
)
frames = response.json() or ()
for frame in frames:
frame_id = uuid.UUID(frame['id']).hex
self.frames[frame_id] = (
frame['project'],
frame['start_at'],
frame['end_at'],
frame['tags']
)
return frames
def push(self, last_pull):
dest, headers = self._get_request_info('frames/bulk')
frames = []
for frame in self.frames:
if last_pull > frame.updated_at > self.last_sync:
frames.append({
'id': uuid.UUID(frame.id).urn,
'start_at': str(frame.start.to('utc')),
'end_at': str(frame.stop.to('utc')),
'project': frame.project,
'tags': frame.tags
})
try:
response = requests.post(dest, json.dumps(frames), headers=headers)
assert response.status_code == 201
except requests.ConnectionError:
raise WatsonError("Unable to reach the server.")
except AssertionError:
raise WatsonError(
u"An error occurred with the remote server (status: {}). "
u"Response was:\n{}".format(
response.status_code,
response.text
)
)
return frames
def merge_report(self, frames_with_conflict):
conflict_file_frames = Frames(self._load_json_file(
frames_with_conflict, type=list))
conflicting = []
merging = []
for conflict_frame in conflict_file_frames:
try:
original_frame = self.frames[conflict_frame.id]
if original_frame != conflict_frame:
# frame from conflict frames file conflicts with frame
# from original frames file
conflicting.append(conflict_frame)
except KeyError:
# conflicting frame doesn't exist in original frame
merging.append(conflict_frame)
return conflicting, merging
def report(self, from_, to, current=None, projects=None, tags=None,
year=None, month=None, week=None, day=None, luna=None,
all=None):
for start_time in (_ for _ in [day, week, month, year, luna, all]
if _ is not None):
from_ = start_time
if from_ > to:
raise WatsonError("'from' must be anterior to 'to'")
if tags is None:
tags = []
if self.current:
if current or (current is None and
self.config.getboolean(
'options', 'report_current')):
cur = self.current
self.frames.add(cur['project'], cur['start'], arrow.utcnow(),
cur['tags'], id="current")
span = self.frames.span(from_, to)
frames_by_project = sorted_groupby(
self.frames.filter(
projects=projects or None, tags=tags or None, span=span
),
operator.attrgetter('project')
)
total = datetime.timedelta()
report = {
'timespan': {
'from': str(span.start),
'to': str(span.stop),
},
'projects': []
}
for project, frames in frames_by_project:
frames = tuple(frames)
delta = reduce(
operator.add,
(f.stop - f.start for f in frames),
datetime.timedelta()
)
total += delta
project_report = {
'name': project,
'time': delta.total_seconds(),
'tags': []
}
tags_to_print = sorted(
set(tag for frame in frames for tag in frame.tags
if tag in tags or not tags)
)
for tag in tags_to_print:
delta = reduce(
operator.add,
(f.stop - f.start for f in frames if tag in f.tags),
datetime.timedelta()
)
project_report['tags'].append({
'name': tag,
'time': delta.total_seconds()
})
report['projects'].append(project_report)
report['time'] = total.total_seconds()
return report
def rename_project(self, old_project, new_project):
"""Rename a project in all affected frames."""
if old_project not in self.projects:
raise ValueError(u'Project "%s" does not exist' % old_project)
updated_at = arrow.utcnow()
# rename project
for frame in self.frames:
if frame.project == old_project:
self.frames[frame.id] = frame._replace(
project=new_project,
updated_at=updated_at
)
self.frames.changed = True
self.save()
def rename_tag(self, old_tag, new_tag):
"""Rename a tag in all affected frames."""
if old_tag not in self.tags:
raise ValueError(u'Tag "%s" does not exist' % old_tag)
updated_at = arrow.utcnow()
# rename tag
for frame in self.frames:
if old_tag in frame.tags:
self.frames[frame.id] = frame._replace(
tags=[new_tag if t == old_tag else t for t in frame.tags],
updated_at=updated_at
)
self.frames.changed = True
self.save()
|
the-stack_106_29889 | import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, lib
from pandas._typing import (
Axis,
Dtype,
FilePathOrBuffer,
FrameOrSeries,
JSONSerializable,
Label,
Level,
Renamer,
)
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender,
Substitution,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
validate_percentile,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_float,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.indexes.api import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.missing import find_valid_index
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
if TYPE_CHECKING:
from pandas.core.resample import Resampler
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs: Dict[str, str] = dict()
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
f"cannot replace {to_replace} with method {method} on a "
f"{type(self).__name__}"
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: List[str] = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
]
_internal_names_set: Set[str] = set(_internal_names)
_accessors: Set[str] = set()
_deprecations: FrozenSet[str] = frozenset(["get_values"])
_metadata: List[str] = []
_is_copy = None
_data: BlockManager
_attrs: Dict[Optional[Hashable], Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
axes: Optional[List[Index]] = None,
copy: bool = False,
dtype: Optional[Dtype] = None,
attrs: Optional[Mapping[Optional[Hashable], Any]] = None,
fastpath: bool = False,
):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=self._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def attrs(self) -> Dict[Optional[Hashable], Any]:
"""
Dictionary of global attributes on this object.
.. warning::
attrs is experimental and may change without warning.
"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:
self._attrs = dict(value)
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented "
f"in the {type(self).__name__} constructor"
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
_AXIS_ALIASES = {"rows": 0}
_AXIS_IALIASES = {0: "rows"}
_stat_axis_number = 0
_stat_axis_name = "index"
_ix = None
_AXIS_ORDERS: List[str]
_AXIS_NUMBERS: Dict[str, int]
_AXIS_NAMES: Dict[int, str]
_AXIS_REVERSED: bool
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
def _construct_axes_from_arguments(
self, args, kwargs, require_all: bool = False, sentinel=None
):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments specified!")
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError(f"No axis named {axis} for object type {cls}")
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = f"{prefix}level_{i}"
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
d: Dict[str, ABCSeries] = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}
def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
"""
Return the special character free column resolvers of a dataframe.
Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
return {clean_column_name(self.name): self}
return {
clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)
}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self) -> List[Index]:
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self) -> int:
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self):
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : %(axes_single_arg)s, default 0
The axis to update. The value 0 identifies the rows%(axis_description_sub)s.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of type %(klass)s if inplace=False, None otherwise.
See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
"""
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels) -> None:
self._data.set_axis(axis, labels)
self._clear_item_cache()
def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self: FrameOrSeries, item) -> FrameOrSeries:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
# ----------------------------------------------------------------------
# Rename
def rename(
self: FrameOrSeries,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[FrameOrSeries]:
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
if mapper is None and index is None and columns is None:
raise TypeError("must pass an index to rename")
if index is not None or columns is not None:
if axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'"
)
elif mapper is not None:
raise TypeError(
"Cannot specify both 'mapper' and any of 'index' or 'columns'"
)
else:
# use the mapper argument
if axis and self._get_axis_number(axis) == 1:
columns = mapper
else:
index = mapper
result = self if inplace else self.copy(deep=copy)
for axis_no, replacements in enumerate((index, columns)):
if replacements is None:
continue
ax = self._get_axis(axis_no)
baxis = self._get_block_manager_axis(axis_no)
f = com.get_rename_function(replacements)
if level is not None:
level = ax._get_level_number(level)
# GH 13473
if not callable(replacements):
indexer = ax.get_indexer_for(replacements)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label
for index, label in enumerate(replacements)
if indexer[index] == -1
]
raise KeyError(f"{missing_labels} not found in axis")
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
return None
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=lib.no_default, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=lib.no_default
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not lib.no_default:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is lib.no_default:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other) -> bool:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
right are not equal. Provides an easy interface to ignore
inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(f"Unary negative expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}")
return self.__array_wrap__(arr)
def __invert__(self):
if not self.size:
# inv fails with 0 len
return self
new_data = self._data.apply(operator.invert)
result = self._constructor(new_data).__finalize__(self)
return result
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
f"{type(self).__name__}"
)
self.__nonzero__()
def __abs__(self: FrameOrSeries) -> FrameOrSeries:
return self.abs()
def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0) -> bool_t:
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
f"The {label_axis_name} label '{key}' "
f"is not unique.{multi_message}"
)
)
return values
def _drop_labels_or_levels(self, keys, axis: int = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
f"{repr(type(self).__name__)} objects are mutable, "
f"thus they cannot be hashed"
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self) -> int:
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key) -> bool_t:
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self) -> bool_t:
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None) -> np.ndarray:
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
result = lib.item_from_zerodim(result)
if is_scalar(result):
# e.g. we get here with np.ptp(series)
# ptp also requires the item_from_zerodim
return result
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self) -> Dict[str, Any]:
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(
_data=self._data,
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta,
)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
attrs = state.get("_attrs", {})
object.__setattr__(self, "_attrs", attrs)
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
elif len(state) == 2:
raise NotImplementedError("Pre-0.12 pickles are no longer supported")
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = f"[{','.join(map(pprint_thing, self))}]"
return f"{type(self).__name__}({prepr})"
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_markdown"
] = """
Print %(klass)s in Markdown-friendly format.
.. versionadded:: 1.0.0
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
%(klass)s in Markdown-friendly format.
"""
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
) -> None:
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
orient: Optional[str] = None,
date_format: Optional[str] = None,
double_precision: int = 10,
force_ascii: bool_t = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool_t = False,
compression: Optional[str] = "infer",
index: bool_t = True,
indent: Optional[int] = None,
) -> Optional[str]:
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series:
- default is 'index'
- allowed values are: {'split','records','index','table'}.
* DataFrame:
- default is 'columns'
- allowed values are: {'split', 'records', 'index', 'columns',
'values', 'table'}.
* The format of the JSON string:
- 'split' : dict like {'index' -> [index], 'columns' -> [columns],
'data' -> [values]}
- 'records' : list like [{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
Describing the data, where data component is like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
indent : int, optional
Length of whitespace used to indent each record.
.. versionadded:: 1.0.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json
Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
config.is_nonnegative_int(indent)
indent = indent or 0
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
indent=indent,
)
def to_hdf(
self,
path_or_buf,
key: str,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool_t = False,
format: Optional[str] = None,
index: bool_t = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool_t] = None,
data_columns: Optional[List[str]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
append : bool, default False
For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
- If None, pd.get_option('io.hdf.default_format') is checked,
followed by fallback to "fixed"
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
encoding : str, default "UTF-8"
min_itemsize : dict or int, optional
Map column names to minimum string sizes for columns.
nan_rep : Any, optional
How to represent null values as str.
Not allowed with append=True.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(
path_or_buf,
key,
self,
mode=mode,
complevel=complevel,
complib=complib,
append=append,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists: str = "fail",
index: bool_t = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
connectable See `here \
<https://docs.sqlalchemy.org/en/13/core/connections.html>`_
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(
self,
path,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
) -> None:
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html.
.. versionadded:: 0.21.0.
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(
self, excel: bool_t = True, sep: Optional[str] = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot',
... 'falcon', 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
caption=None,
label=None,
):
r"""
Render object to a LaTeX tabular, longtable, or nested table/tabular.
Requires ``\usepackage{booktabs}``. The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{table.tex}``.
.. versionchanged:: 0.20.2
Added to Series.
.. versionchanged:: 1.0.0
Added caption and label arguments.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
caption : str, optional
The LaTeX caption to be placed inside ``\caption{}`` in the output.
.. versionadded:: 1.0.0
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
This is used with ``\ref{}`` in the main ``.tex`` file.
.. versionadded:: 1.0.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Label]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Mapping[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
Create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher) -> None:
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self) -> None:
"""Reset the cacher."""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value) -> None:
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self) -> bool_t:
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(
self, clear: bool_t = False, verify_is_copy: bool_t = True
) -> None:
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
# Note: we need to call ref._maybe_cache_changed even in the
# case where it will raise. (Uh, not clear why)
try:
ref._maybe_cache_changed(cacher[0], self)
except AssertionError:
# ref._data.setitem can raise
# AssertionError because of shape mismatch
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self) -> None:
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(
self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs
) -> FrameOrSeries:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool
Before pandas 1.0, ``is_copy=False`` can be specified to ensure
that the return value is an actual copy. Starting with pandas 1.0,
``take`` always returns a copy, and the keyword is therefore
deprecated.
.. deprecated:: 1.0.0
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if is_copy is not None:
warnings.warn(
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this.",
FutureWarning,
stacklevel=2,
)
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
return self._constructor(new_data).__finalize__(self)
def _take_with_is_copy(
self: FrameOrSeries, indices, axis=0, **kwargs
) -> FrameOrSeries:
"""
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).
See the docstring of `take` for full explanation of the parameters.
"""
result = self.take(indices=indices, axis=axis, **kwargs)
# Maybe set copy if we didn't actually change the index.
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level: bool_t = True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
_indexer = [slice(None)] * self.ndim
_indexer[axis] = loc
indexer = tuple(_indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
(inds,) = loc.nonzero()
return self._take_with_is_copy(inds, axis=axis)
else:
return self._take_with_is_copy(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
# In this case loc should be an integer
if self.ndim == 1:
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
return self._values[loc]
new_values = self._data.fast_xs(loc)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs: Callable = xs
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take_with_is_copy(item, axis=self._info_axis_number)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:
"""
Construct a slice of this container.
Slicing with this method is *always* positional.
"""
assert isinstance(slobj, slice), type(slobj)
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value) -> None:
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref, copy: bool_t = True) -> None:
if not copy:
self._is_copy = None
else:
assert ref is not None
self._is_copy = weakref.ref(ref)
def _check_is_chained_assignment_possible(self) -> bool_t:
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"https://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key) -> None:
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(
self: FrameOrSeries,
other,
method: Optional[str] = None,
copy: bool_t = True,
limit=None,
tolerance=None,
) -> FrameOrSeries:
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool_t = False,
errors: str = "raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(
self: FrameOrSeries, labels, axis, level=None, errors: str = "raise"
) -> FrameOrSeries:
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError(f"{labels} not found in axis")
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError(f"{labels} not found in axis")
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) # type: ignore
def sort_values(
self,
axis=0,
ascending=True,
inplace: bool_t = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool_t = False,
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:
"""
Conform %(klass)s to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
return self._reindex_multi(axes, copy, fill_value)
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(
self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy
) -> FrameOrSeries:
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level) -> bool_t:
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
raise AbstractMethodError(self)
def _reindex_with_indexers(
self: FrameOrSeries,
reindexers,
fill_value=None,
copy: bool_t = False,
allow_dups: bool_t = False,
) -> FrameOrSeries:
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(
self: FrameOrSeries,
items=None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis=None,
) -> FrameOrSeries:
"""
Subset the dataframe rows or columns according to the specified index labels.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
The axis to filter on, expressed either as an index (int)
or axis name (str). By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n]
def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3)
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self: FrameOrSeries,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
) -> FrameOrSeries:
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Notes
-----
If `frac` > 1, `replacement` should be set to `True`.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.
>>> df.sample(frac=2, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
falcon 2 2 10
falcon 2 2 10
fish 0 0 8
dog 4 0 2
fish 0 0 8
dog 4 0 2
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError(
"String passed to weights not a valid column"
)
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif frac is not None and frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
Function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply
DataFrame.applymap
Series.map
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values.
Produced %(klass)s will have same axis length as self.
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(
self: FrameOrSeries, other, method=None, **kwargs
) -> FrameOrSeries:
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in other.attrs:
self.attrs[name] = other.attrs[name]
# For subclasses using _metadata.
for name in self._metadata:
assert isinstance(name, str)
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name: str):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value) -> None:
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self) -> None:
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace: bool_t = False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value) -> bool_t:
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
if is_float(value) and np.isnan(value):
return True
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self) -> np.ndarray:
"""internal implementation"""
return self.values
def _internal_get_values(self) -> np.ndarray:
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
"""
return self.values
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
def _to_dict_of_blocks(self, copy: bool_t = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(
self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = "raise"
) -> FrameOrSeries:
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1, 2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(dtype=dtype[col_name], copy=copy, errors=errors)
)
else:
results.append(col.copy() if copy else col)
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = [
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
]
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:
return self.copy(deep=deep)
def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
def _convert(
self: FrameOrSeries,
datetime: bool_t = False,
numeric: bool_t = False,
timedelta: bool_t = False,
coerce: bool_t = False,
copy: bool_t = True,
) -> FrameOrSeries:
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self: FrameOrSeries) -> FrameOrSeries:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
def convert_dtypes(
self: FrameOrSeries,
infer_objects: bool_t = True,
convert_string: bool_t = True,
convert_integer: bool_t = True,
convert_boolean: bool_t = True,
) -> FrameOrSeries:
"""
Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.
.. versionadded:: 1.0.0
Parameters
----------
infer_objects : bool, default True
Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
Returns
-------
Series or DataFrame
Copy of input object with new dtype.
See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, and ``convert_boolean``, it is
possible to turn off individual conversions to ``StringDtype``, the integer
extension types or ``BooleanDtype``, respectively.
For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction. Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension
type, otherwise leave as ``object``.
If the dtype is integer, convert to an appropriate integer extension type.
If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type.
In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.
Examples
--------
>>> df = pd.DataFrame(
... {
... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
... }
... )
Start with a DataFrame with default dtypes.
>>> df
a b c d e f
0 1 x True h 10.0 NaN
1 2 y False i NaN 100.5
2 3 z NaN NaN 20.0 200.0
>>> df.dtypes
a int32
b object
c object
d object
e float64
f float64
dtype: object
Convert the DataFrame to use best possible dtypes.
>>> dfn = df.convert_dtypes()
>>> dfn
a b c d e f
0 1 x True h 10 NaN
1 2 y False i <NA> 100.5
2 3 z <NA> <NA> 20 200.0
>>> dfn.dtypes
a Int32
b string
c boolean
d string
e Int64
f float64
dtype: object
Start with a Series of strings and missing data represented by ``np.nan``.
>>> s = pd.Series(["a", "b", np.nan])
>>> s
0 a
1 b
2 NaN
dtype: object
Obtain a Series with dtype ``StringDtype``.
>>> s.convert_dtypes()
0 a
1 b
2 <NA>
dtype: string
"""
if self.ndim == 1:
return self._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
else:
results = [
col._convert_dtypes(
infer_objects, convert_string, convert_integer, convert_boolean
)
for col_name, col in self.items()
]
result = pd.concat(results, axis=1, copy=False)
return result
# ----------------------------------------------------------------------
# Filling NA's
@doc(**_shared_doc_kwargs)
def fillna(
self: FrameOrSeries,
value=None,
method=None,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : {axes_single_arg}
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
{klass} or None
Object with missing values filled or None if ``inplace=True``.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
value = create_series_with_explicit_dtype(
value, dtype_if_empty=object
)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
f'"{type(value).__name__}"'
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError(f"invalid fill value with a {type(value)}")
if inplace:
self._update_inplace(new_data)
return None
else:
return self._constructor(new_data).__finalize__(self)
def ffill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(
self: FrameOrSeries,
axis=None,
inplace: bool_t = False,
limit=None,
downcast=None,
) -> Optional[FrameOrSeries]:
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s or None
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None '
'and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values "
"of the top level mapping must be mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
f"'regex' must be a string or a compiled regular expression "
f"or a list or dict of strings or regular expressions, "
f"you passed a {repr(type(regex).__name__)}"
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
raise TypeError(
f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}'
)
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
f"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating."
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
values=_maybe_transposed_self,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs,
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq)
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where, dtype=np.float64)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self: FrameOrSeries) -> FrameOrSeries:
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self: FrameOrSeries) -> FrameOrSeries:
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(
self: FrameOrSeries,
lower=None,
upper=None,
axis=None,
inplace: bool_t = False,
*args,
**kwargs,
) -> FrameOrSeries:
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
_shared_docs[
"groupby"
] = """
Group %(klass)s using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
Returns
-------
%(klass)sGroupBy
Returns a groupby object that contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
"""
def asfreq(
self: FrameOrSeries,
freq,
method=None,
how: Optional[str] = None,
normalize: bool_t = False,
fill_value=None,
) -> FrameOrSeries:
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {'start', 'end'}, default end
For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
Whether to reset output index to midnight.
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(
self: FrameOrSeries, time, asof: bool_t = False, axis=None
) -> FrameOrSeries:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self._take_with_is_copy(indexer, axis=axis)
def between_time(
self: FrameOrSeries,
start_time,
end_time,
include_start: bool_t = True,
include_end: bool_t = True,
axis=None,
) -> FrameOrSeries:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
end_time : datetime.time or str
include_start : bool, default True
include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self._take_with_is_copy(indexer, axis=axis)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: int = 0,
on=None,
level=None,
) -> "Resampler":
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import get_resampler
axis = self._get_axis_number(axis)
return get_resampler(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
def first(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset initial periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.is_anchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self: FrameOrSeries, offset) -> FrameOrSeries:
"""
Method to subset final periods of time series data based on a date offset.
Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self: FrameOrSeries,
axis=0,
method: str = "average",
numeric_only: Optional[bool_t] = None,
na_option: str = "keep",
ascending: bool_t = True,
pct: bool_t = False,
) -> FrameOrSeries:
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series:
- pad / ffill: propagate last valid observation forward to next valid.
- backfill / bfill: use NEXT valid observation to fill gap.
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit.
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions.
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = self._ensure_type(
left.fillna(method=method, axis=fill_axis, limit=limit)
)
right = right.fillna(method=method, axis=fill_axis, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy: bool_t = True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : bool %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(
self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None
) -> FrameOrSeries:
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:
"""
Equivalent to `shift` without copying data.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
raise ValueError(
f"Given freq {freq.rule_code} does not match "
f"PeriodIndex freq {orig_freq.rule_code}"
)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(
self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(
self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True
) -> FrameOrSeries:
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self: FrameOrSeries,
tz,
axis=0,
level=None,
copy: bool_t = True,
ambiguous="raise",
nonexistent: str = "raise",
) -> FrameOrSeries:
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7),
... index=pd.DatetimeIndex(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3),
... index=pd.DatetimeIndex(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2),
... index=pd.DatetimeIndex(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError(f"The level {level} is not valid")
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self: FrameOrSeries) -> FrameOrSeries:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(
self: FrameOrSeries, percentiles=None, include=None, exclude=None
) -> FrameOrSeries:
"""
Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_timestamp_1d(data):
# GH-30164
stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"]
d = (
[data.count(), data.mean(), data.min()]
+ data.quantile(percentiles).tolist()
+ [data.max()]
)
return pd.Series(d, index=stat_index, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_datetime64_any_dtype(data):
return describe_timestamp_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names: List[Optional[Hashable]] = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or str, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(
self: FrameOrSeries,
periods=1,
fill_method="pad",
limit=None,
freq=None,
**kwargs,
) -> FrameOrSeries:
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self._ensure_type(
self.fillna(method=fill_method, axis=axis, limit=limit)
)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
if freq is not None:
# Shift method is implemented differently when freq is not None
# We want to restore the original index
rs = rs.loc[~rs.index.duplicated()]
rs = rs.reindex_like(data)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name,
name2,
axis_descr,
_any_desc,
nanops.nanany,
_any_see_also,
_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name,
name2,
axis_descr,
_all_desc,
nanops.nanall,
_all_see_also,
_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc_mad)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name,
name2,
axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name,
name2,
axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name,
name2,
axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd,
)
cls.cummin = _make_cum_function(
cls,
"cummin",
name,
name2,
axis_descr,
"minimum",
np.minimum.accumulate,
"min",
np.inf,
np.nan,
_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name,
name2,
axis_descr,
"sum",
np.cumsum,
"sum",
0.0,
np.nan,
_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name,
name2,
axis_descr,
"product",
np.cumprod,
"prod",
1.0,
np.nan,
_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name,
name2,
axis_descr,
"maximum",
np.maximum.accumulate,
"max",
-np.inf,
np.nan,
_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name,
name2,
axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum,
_stat_func_see_also,
_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name,
name2,
axis_descr,
"Return the mean of the values for the requested axis.",
nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name,
name2,
axis_descr,
"Return unbiased skew over requested axis.\n\nNormalized by N-1.",
nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name,
name2,
axis_descr,
"Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name,
name2,
axis_descr,
"Return the product of the values for the requested axis.",
nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name,
name2,
axis_descr,
"Return the median of the values for the requested axis.",
nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name,
name2,
axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax,
_stat_func_see_also,
_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name,
name2,
axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin,
_stat_func_see_also,
_min_examples,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
idxpos = find_valid_index(self._values, how)
if idxpos is None:
return None
return self.index[idxpos]
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = (
f"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}"
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_doc_mad = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default None
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also: str = "", examples: str = ""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name,
name1,
name2,
axis_descr,
desc,
accum_func,
accum_func_name,
mask_a,
mask_b,
examples,
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
if axis == 1:
return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
def na_accum_func(blk_values):
# We will be applying this function to block values
if blk_values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = blk_values.dtype
# We need to define mask before masking NaTs
mask = isna(blk_values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = blk_values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = blk_values
changed = False
result = accum_func(y.view("i8"), axis)
if skipna:
np.putmask(result, mask, iNaT)
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(blk_values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(blk_values)._from_sequence(result, dtype=orig_dtype)
elif skipna and not issubclass(
blk_values.dtype.type, (np.integer, np.bool_)
):
vals = blk_values.copy().T
mask = isna(vals)
np.putmask(vals, mask, mask_a)
result = accum_func(vals, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(blk_values.T, axis)
# transpose back for ndarray, not for EA
return result.T if hasattr(result, "T") else result
result = self._data.apply(na_accum_func)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
|
the-stack_106_29890 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
from oslo_utils.fixture import uuidsentinel
from nova.api.openstack.compute import availability_zone as az_v21
from nova.api.openstack.compute import servers as servers_v21
from nova import availability_zones
from nova.compute import api as compute_api
from nova import context
from nova.db import api as db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_service
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, filters=None, **kwargs):
disabled = filters.get('disabled') if filters else None
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
db_s = dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
# The version field is immutable so remove that before creating the obj
db_s.pop('version', None)
return objects.Service(context, **db_s)
if disabled:
svcs = [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
svcs = [__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
# nova-conductor is in the same zone and host as nova-sched
# and is here to make sure /detail filters out duplicates.
__fake_service("nova-conductor", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
return objects.ServiceList(objects=svcs)
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
fakes.stub_out_nw_api(self)
self.stub_out('nova.availability_zones.set_availability_zones',
lambda c, services: services)
self.stub_out('nova.servicegroup.API.service_is_up',
lambda s, service: service['binary'] != u"nova-network")
self.controller = self.availability_zone.AvailabilityZoneController()
self.mock_service_get_all = mock.patch.object(
self.controller.host_api, 'service_get_all',
side_effect=fake_service_get_all).start()
self.addCleanup(self.mock_service_get_all.stop)
self.req = fakes.HTTPRequest.blank('')
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
nova_network_timestamp = iso8601.parse_date("2012-12-26T14:45:24Z")
expected = [
{
'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {
'fake_host-1': {
'nova-compute': {
'active': True,
'available': True,
'updated_at': timestamp
}
}
}
},
{
'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {
'fake_host-1': {
'nova-sched': {
'active': True,
'available': True,
'updated_at': timestamp
},
'nova-conductor': {
'active': True,
'available': True,
'updated_at': timestamp
}
},
'fake_host-2': {
'nova-network': {
'active': True,
'available': False,
'updated_at': nova_network_timestamp
}
}
}
},
{
'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None
}
]
self.assertEqual(expected, zones)
# We get both enabled and disabled services per cell (just one in this
# test case) so we'll query the services table twice.
self.assertEqual(2, self.mock_service_get_all.call_count,
self.mock_service_get_all.call_args_list)
@mock.patch.object(availability_zones, 'get_availability_zones',
return_value=[['nova'], []])
def test_availability_zone_detail_no_services(self, mock_get_az):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'nova'}]}
resp_dict = self.controller.detail(self.req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/%s/' % fakes.FAKE_PROJECT_ID
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
# Neutron security groups are tested in test_neutron_security_groups.py
self.flags(use_neutron=False)
fakes.stub_out_nw_api(self)
self._set_up_controller()
def create_db_entry_for_new_instance(*args, **kwargs):
instance = args[4]
instance.uuid = FAKE_UUID
return instance
fake.stub_out_image_service(self)
self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
create_db_entry_for_new_instance)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = servers_v21.ServersController()
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('nova', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stub_out('nova.compute.api.API.create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
admin_context = context.get_admin_context()
db.service_create(admin_context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = objects.Aggregate(admin_context,
name='agg1',
uuid=uuidsentinel.agg_uuid,
metadata={'availability_zone': 'nova'})
agg.create()
agg.add_host('host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'nova'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
res = self.controller.create(self.req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
|
the-stack_106_29892 | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox, Chrome, PhantomJS
from selenium import webdriver
from argparse import ArgumentParser
from urllib.parse import quote
import time
import copy
import sys
import os
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
TIMEOUT = 20
TIMESLP = 3
my_sender = '[email protected]' # 发件人邮箱账号
my_pass = 'fujkixpkjiyhcaji' # 发件人邮箱密码
my_user = '[email protected]' # 收件人邮箱账号
def mail():
ret = True
try:
cur_time = time.strftime("%d/%m/%Y")
msgRoot = MIMEMultipart('related')
msgRoot['From'] = Header('PKU-AutoSubmit', 'utf-8')
msgRoot['To'] = Header('student', 'utf-8')
subject = cur_time + ' 报备成功!'
msgRoot['Subject'] = Header(subject, 'utf-8')
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
mail_msg = """
<p>自动报备成功!</p>
<p>截图:</p>
<p><img src="cid:image1"></p>
"""
msgAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 指定图片为当前目录
fp = open('result.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# 定义图片 ID,在 HTML 文本中引用
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender, [my_user, ], msgRoot.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret
def login(driver, username, password, failed=0):
if failed == 3:
raise Exception('门户登录失败')
iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
appName = quote('北京大学校内信息门户新版')
redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
driver.get('https://portal.pku.edu.cn/portal2017/')
driver.get(
f'{iaaaUrl}?appID=portal2017&appName={appName}&redirectUrl={redirectUrl}'
)
print('门户登陆中...')
driver.find_element_by_id('user_name').send_keys(username)
time.sleep(TIMESLP)
driver.find_element_by_id('password').send_keys(password)
time.sleep(TIMESLP)
driver.find_element_by_id('logon_button').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.LINK_TEXT, '我知道了')))
except:
pass
else:
driver.find_element_by_link_text('我知道了').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
except:
login(driver, username, password, failed + 1)
else:
print('门户登录成功!')
def go_to_application_out(driver):
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-input__inner')))
def go_to_application_in(driver):
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-select')))
def select_in_out(driver, way):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{way}"]').click()
def select_campus(driver, campus):
driver.find_elements_by_class_name('el-select')[1].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{campus}"]').click()
def select_destination(driver, destination):
driver.find_elements_by_class_name('el-select')[2].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{destination}"]').click()
def select_district(driver, district):
driver.find_elements_by_class_name('el-select')[3].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{district}"]').click()
def write_reason(driver, reason):
driver.find_element_by_class_name('el-textarea__inner').send_keys(
f'{reason}')
time.sleep(TIMESLP)
def write_track(driver, track):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{track}')
time.sleep(TIMESLP)
def write_street(driver, street):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{street}')
time.sleep(TIMESLP)
def click_check(driver):
driver.find_element_by_class_name('el-checkbox__label').click()
time.sleep(TIMESLP)
def click_inPeking(driver):
driver.find_element_by_class_name('el-radio__inner').click()
time.sleep(TIMESLP)
def submit(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"保存")]').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located(
(By.XPATH, '(//button/span[contains(text(),"提交")])[3]')))
driver.find_element_by_xpath(
'(//button/span[contains(text(),"提交")])[3]').click()
time.sleep(TIMESLP)
def screen_capture(driver):
driver.back()
driver.back()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
driver.find_elements_by_class_name('el-card__body')[1].click()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located(
(By.XPATH, '//button/span[contains(text(),"加载更多")]')))
driver.maximize_window()
time.sleep(0.1)
driver.save_screenshot('result.png')
print('备案历史截图已保存')
def fill_out(driver, campus, reason, destination, track):
print('开始填报出校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '出校')
print('Done')
print('选择校区 ', end='')
select_campus(driver, campus)
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
print('选择出校目的地 ', end='')
select_destination(driver, destination)
print('Done')
print('填写出校行动轨迹 ', end='')
write_track(driver, track)
print('Done')
click_check(driver)
submit(driver)
print('出校备案填报完毕!')
def fill_in(driver, campus, reason, habitation, district, street):
print('开始填报入校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '入校')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
if habitation != '北京':
raise Exception('暂不支持京外入校备案,请手动填写')
print('选择居住地所在区 ', end='')
select_district(driver, district)
print('Done')
print('填写居住地所在街道 ', end='')
write_street(driver, street)
print('Done')
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案填报完毕!')
def run(driver, username, password, campus, reason, destination, track,
habitation, district, street):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
fill_out(driver, campus, reason, destination, track)
print('=================================')
go_to_application_in(driver)
fill_in(driver, campus, reason, habitation, district, street)
print('=================================')
screen_capture(driver)
print('=================================')
ret = mail()
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
print('可以愉快的玩耍啦!')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--username', '-u', type=str, help='用户名')
parser.add_argument('--password', '-p', type=str, help='密码')
parser.add_argument('--campus', type=str, help='所在校区, 燕园、万柳、畅春园、圆明园、中关新园', default='燕园')
parser.add_argument('--reason', type=str, help='出校原因, eg. 吃饭', default='上课')
parser.add_argument('--destination', type=str, help='出校目的地, eg. 北京', default='北京')
parser.add_argument('--track', type=str, help='出校轨迹, eg. 畅春园食堂', default='东南门-理教-勺园')
parser.add_argument('--habitation', type=str, help='入校前居住地, eg. 北京', default='北京')
parser.add_argument('--district', type=str, help='入校前居住所在区, eg. 海淀区', default='海淀区')
parser.add_argument('--street', type=str, help='入校前居住所在街道, eg. 燕园街道', default='燕园街道')
args = parser.parse_args()
args_public = copy.deepcopy(args)
args_public.password = 'xxxxxxxx'
print('Arguments: {}'.format(args_public))
print('Driver Launching...')
# driver = Firefox()
# driver = Chrome()
if sys.platform == 'darwin': # macOS
phantomjs_path = os.path.join('phantomjs', 'phantomjs-darwin')
elif sys.platform == 'linux': # linux
phantomjs_path = os.path.join('phantomjs', 'phantomjs-linux-x86_64')
else: # windows
phantomjs_path = os.path.join('phantomjs', 'phantomjs-windows.exe')
driver = PhantomJS(executable_path=phantomjs_path)
run(driver, args.username, args.password, args.campus, args.reason,
args.destination, args.track, args.habitation, args.district,
args.street)
driver.close()
#brian2
|
the-stack_106_29894 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility for testing Kubeflow-based orchestrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import re
import subprocess
import tarfile
import time
from typing import Any, Dict, List, Text
from absl import logging
import kfp
from kfp_server_api import rest
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import InfraValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.dsl.components.common import resolver
from tfx.dsl.input_resolution.strategies import latest_artifact_strategy
from tfx.dsl.io import fileio
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import test_utils
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.proto import infra_validator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import channel_utils
from tfx.types import component_spec
from tfx.types import standard_artifacts
from tfx.types.standard_artifacts import Model
from tfx.utils import kube_utils
from tfx.utils import retry
from tfx.utils import test_case_utils
# TODO(jiyongjung): Merge with kube_utils.PodStatus
# Various execution status of a KFP pipeline.
KFP_RUNNING_STATUS = 'running'
KFP_SUCCESS_STATUS = 'succeeded'
KFP_FAIL_STATUS = 'failed'
KFP_SKIPPED_STATUS = 'skipped'
KFP_ERROR_STATUS = 'error'
KFP_FINAL_STATUS = frozenset(
(KFP_SUCCESS_STATUS, KFP_FAIL_STATUS, KFP_SKIPPED_STATUS, KFP_ERROR_STATUS))
def poll_kfp_with_retry(host: Text, run_id: Text, retry_limit: int,
timeout: datetime.timedelta,
polling_interval: int) -> Text:
"""Gets the pipeline execution status by polling KFP at the specified host.
Args:
host: address of the KFP deployment.
run_id: id of the execution of the pipeline.
retry_limit: number of retries that will be performed before raise an error.
timeout: timeout of this long-running operation, in timedelta.
polling_interval: interval between two consecutive polls, in seconds.
Returns:
The final status of the execution. Possible value can be found at
https://github.com/kubeflow/pipelines/blob/master/backend/api/run.proto#L254
Raises:
RuntimeError: if polling failed for retry_limit times consecutively.
"""
start_time = datetime.datetime.now()
retry_count = 0
while True:
# TODO(jxzheng): workaround for 1hr timeout limit in kfp.Client().
# This should be changed after
# https://github.com/kubeflow/pipelines/issues/3630 is fixed.
# Currently gcloud authentication token has a 1-hour expiration by default
# but kfp.Client() does not have a refreshing mechanism in place. This
# causes failure when attempting to get running status for a long pipeline
# execution (> 1 hour).
# Instead of implementing a whole authentication refreshing mechanism
# here, we chose re-creating kfp.Client() frequently to make sure the
# authentication does not expire. This is based on the fact that
# kfp.Client() is very light-weight.
# See more details at
# https://github.com/kubeflow/pipelines/issues/3630
client = kfp.Client(host=host)
# TODO(b/156784019): workaround the known issue at b/156784019 and
# https://github.com/kubeflow/pipelines/issues/3669
# by wait-and-retry when ApiException is hit.
try:
get_run_response = client.get_run(run_id=run_id)
except rest.ApiException as api_err:
# If get_run failed with ApiException, wait _POLLING_INTERVAL and retry.
if retry_count < retry_limit:
retry_count += 1
logging.info('API error %s was hit. Retrying: %s / %s.', api_err,
retry_count, retry_limit)
time.sleep(polling_interval)
continue
raise RuntimeError('Still hit remote error after %s retries: %s' %
(retry_limit, api_err))
else:
# If get_run succeeded, reset retry_count.
retry_count = 0
if (get_run_response and get_run_response.run and
get_run_response.run.status and
get_run_response.run.status.lower() in KFP_FINAL_STATUS):
# Return because final status is reached.
return get_run_response.run.status
if datetime.datetime.now() - start_time > timeout:
# Timeout.
raise RuntimeError('Waiting for run timeout at %s' %
datetime.datetime.now().strftime('%H:%M:%S'))
logging.info('Waiting for the job to complete...')
time.sleep(polling_interval)
def print_failure_log_for_run(host: Text, run_id: Text, namespace: Text):
"""Prints logs of failed components of a run.
Prints execution logs for failed componentsusing `logging.info`.
This resembles the behavior of `argo logs` but uses K8s API directly.
Don't print anything if the run was successful.
Args:
host: address of the KFP deployment.
run_id: id of the execution of the pipeline.
namespace: namespace of K8s cluster.
"""
client = kfp.Client(host=host)
run = client.get_run(run_id=run_id)
workflow_manifest = json.loads(run.pipeline_runtime.workflow_manifest)
if kube_utils.PodPhase(
workflow_manifest['status']['phase']) != kube_utils.PodPhase.FAILED:
return
k8s_client = kube_utils.make_core_v1_api()
pods = [i for i in workflow_manifest['status']['nodes'] if i['type'] == 'Pod']
for pod in pods:
if kube_utils.PodPhase(pod['phase']) != kube_utils.PodPhase.FAILED:
continue
display_name = pod['displayName']
pod_id = pod['id']
log = k8s_client.read_namespaced_pod_log(
pod_id, namespace=namespace, container='main')
for line in log.splitlines():
logging.info('%s:%s', display_name, line)
# Custom component definitions for testing purpose.
class _HelloWorldSpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {
'greeting':
component_spec.ChannelParameter(type=standard_artifacts.String)
}
PARAMETERS = {
'word': component_spec.ExecutionParameter(type=str),
}
class _ByeWorldSpec(component_spec.ComponentSpec):
INPUTS = {
'hearing': component_spec.ChannelParameter(type=standard_artifacts.String)
}
OUTPUTS = {}
PARAMETERS = {}
class HelloWorldComponent(BaseComponent):
"""Producer component."""
SPEC_CLASS = _HelloWorldSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
# TODO(b/143965964): move the image to private repo if the test is flaky
# due to docker hub.
image='google/cloud-sdk:latest',
command=['sh', '-c'],
args=[
'echo "hello ' +
ph.exec_property('word') +
'" | gsutil cp - ' +
ph.output('greeting')[0].uri
])
def __init__(self, word, greeting=None):
if not greeting:
artifact = standard_artifacts.String()
greeting = channel_utils.as_channel([artifact])
super(HelloWorldComponent,
self).__init__(_HelloWorldSpec(word=word, greeting=greeting))
class ByeWorldComponent(BaseComponent):
"""Consumer component."""
SPEC_CLASS = _ByeWorldSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='bash:latest',
command=['echo'],
args=['received ' + ph.input('hearing')[0].value])
def __init__(self, hearing):
super(ByeWorldComponent, self).__init__(_ByeWorldSpec(hearing=hearing))
def create_primitive_type_components(
pipeline_name: Text) -> List[BaseComponent]:
"""Creates components for testing primitive type artifact passing.
Args:
pipeline_name: Name of this pipeline.
Returns:
A list of TFX custom container components.
"""
hello_world = HelloWorldComponent(word=pipeline_name)
bye_world = ByeWorldComponent(hearing=hello_world.outputs['greeting'])
return [hello_world, bye_world]
def create_e2e_components(
pipeline_root: Text,
csv_input_location: Text,
transform_module: Text,
trainer_module: Text,
) -> List[BaseComponent]:
"""Creates components for a simple Chicago Taxi TFX pipeline for testing.
Args:
pipeline_root: The root of the pipeline output.
csv_input_location: The location of the input data directory.
transform_module: The location of the transform module file.
trainer_module: The location of the trainer module file.
Returns:
A list of TFX components that constitutes an end-to-end test pipeline.
"""
example_gen = CsvExampleGen(input_base=csv_input_location)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=transform_module)
latest_model_resolver = resolver.Resolver(
strategy_class=latest_artifact_strategy.LatestArtifactStrategy,
latest_model=Channel(type=Model)).with_id('latest_model_resolver')
trainer = Trainer(
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10),
eval_args=trainer_pb2.EvalArgs(num_steps=5),
module_file=trainer_module,
)
# Set the TFMA config for Model Evaluation and Validation.
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
metrics_specs=[
tfma.MetricsSpec(
metrics=[tfma.MetricConfig(class_name='ExampleCount')],
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config)
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=example_gen.outputs['examples'],
serving_spec=infra_validator_pb2.ServingSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServing(
tags=['latest']),
kubernetes=infra_validator_pb2.KubernetesConfig()),
request_spec=infra_validator_pb2.RequestSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServingRequestSpec())
)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(pipeline_root, 'model_serving'))))
return [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
latest_model_resolver,
trainer,
evaluator,
infra_validator,
pusher,
]
@retry.retry(ignore_eventual_failure=True)
def delete_ai_platform_model(model_name):
"""Delete pushed model with the given name in AI Platform."""
# In order to delete model, all versions in the model must be deleted first.
versions_command = ('gcloud', 'ai-platform', 'versions', 'list',
'--model={}'.format(model_name), '--region=global')
# The return code of the following subprocess call will be explicitly checked
# using the logic below, so we don't need to call check_output().
versions = subprocess.run(versions_command, stdout=subprocess.PIPE) # pylint: disable=subprocess-run-check
if versions.returncode == 0:
logging.info('Model %s has versions %s', model_name, versions.stdout)
# The first stdout line is headers, ignore. The columns are
# [NAME] [DEPLOYMENT_URI] [STATE]
#
# By specification of test case, the last version in the output list is the
# default version, which will be deleted last in the for loop, so there's no
# special handling needed hear.
# The operation setting default version is at
# https://github.com/tensorflow/tfx/blob/65633c772f6446189e8be7c6332d32ea221ff836/tfx/extensions/google_cloud_ai_platform/runner.py#L309
for version in versions.stdout.decode('utf-8').strip('\n').split('\n')[1:]:
version = version.split()[0]
logging.info('Deleting version %s of model %s', version, model_name)
version_delete_command = ('gcloud', '--quiet', 'ai-platform', 'versions',
'delete', version,
'--model={}'.format(model_name),
'--region=global')
subprocess.run(version_delete_command, check=True)
logging.info('Deleting model %s', model_name)
subprocess.run(('gcloud', '--quiet', 'ai-platform', 'models', 'delete',
model_name, '--region=global'),
check=True)
class BaseKubeflowTest(test_case_utils.TfxTest):
"""Base class that defines testing harness for pipeline on KubeflowRunner."""
_POLLING_INTERVAL_IN_SECONDS = 10
# The following environment variables need to be set prior to calling the test
# in this file. All variables are required and do not have a default.
# The base container image name to use when building the image used in tests.
_BASE_CONTAINER_IMAGE = os.environ['KFP_E2E_BASE_CONTAINER_IMAGE']
# The src path to use to build docker image
_REPO_BASE = os.environ['KFP_E2E_SRC']
# The project id to use to run tests.
_GCP_PROJECT_ID = os.environ['KFP_E2E_GCP_PROJECT_ID']
# The GCP region in which the end-to-end test is run.
_GCP_REGION = os.environ['KFP_E2E_GCP_REGION']
# The GCP bucket to use to write output artifacts.
_BUCKET_NAME = os.environ['KFP_E2E_BUCKET_NAME']
# The location of test data. The input files are copied to a test-local
# location for each invocation, and cleaned up at the end of test.
_TEST_DATA_ROOT = os.environ['KFP_E2E_TEST_DATA_ROOT']
# The location of test user module. Will be packaged and copied to under the
# pipeline root before pipeline execution.
_MODULE_ROOT = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'components/testdata/module_file')
@classmethod
def setUpClass(cls):
super(BaseKubeflowTest, cls).setUpClass()
if ':' not in cls._BASE_CONTAINER_IMAGE:
# Generate base container image for the test if tag is not specified.
cls.container_image = '{}:{}'.format(cls._BASE_CONTAINER_IMAGE,
test_utils.random_id())
# Create a container image for use by test pipelines.
test_utils.build_and_push_docker_image(cls.container_image,
cls._REPO_BASE)
else: # Use the given image as a base image.
cls.container_image = cls._BASE_CONTAINER_IMAGE
@classmethod
def tearDownClass(cls):
super(BaseKubeflowTest, cls).tearDownClass()
if cls.container_image != cls._BASE_CONTAINER_IMAGE:
# Delete container image used in tests.
logging.info('Deleting image %s', cls.container_image)
subprocess.run(
['gcloud', 'container', 'images', 'delete', cls.container_image],
check=True)
@classmethod
def _get_mysql_pod_name(cls):
"""Returns MySQL pod name in the cluster."""
pod_name = subprocess.check_output([
'kubectl',
'-n',
'kubeflow',
'get',
'pods',
'-l',
'app=mysql',
'--no-headers',
'-o',
'custom-columns=:metadata.name',
]).decode('utf-8').strip('\n')
logging.info('MySQL pod name is: %s', pod_name)
return pod_name
@classmethod
def _get_mlmd_db_name(cls, pipeline_name: Text):
# MySQL DB names must not contain '-' while k8s names must not contain '_'.
# So we replace the dashes here for the DB name.
valid_mysql_name = pipeline_name.replace('-', '_')
# MySQL database name cannot exceed 64 characters.
return 'mlmd_{}'.format(valid_mysql_name[-59:])
def setUp(self):
super(BaseKubeflowTest, self).setUp()
self._test_dir = self.tmp_dir
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
self._test_output_dir = 'gs://{}/test_output'.format(self._BUCKET_NAME)
test_id = test_utils.random_id()
self._testdata_root = 'gs://{}/test_data/{}'.format(self._BUCKET_NAME,
test_id)
subprocess.run(
['gsutil', 'cp', '-r', self._TEST_DATA_ROOT, self._testdata_root],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
self._data_root = os.path.join(self._testdata_root, 'external', 'csv')
self._transform_module = os.path.join(self._MODULE_ROOT,
'transform_module.py')
self._trainer_module = os.path.join(self._MODULE_ROOT, 'trainer_module.py')
self.addCleanup(self._delete_test_dir, test_id)
def _delete_test_dir(self, test_id: Text):
"""Deletes files for this test including the module file and data files.
Args:
test_id: Randomly generated id of the test.
"""
test_utils.delete_gcs_files(self._GCP_PROJECT_ID, self._BUCKET_NAME,
'test_data/{}'.format(test_id))
def _delete_workflow(self, workflow_name: Text):
"""Deletes the specified Argo workflow."""
logging.info('Deleting workflow %s', workflow_name)
subprocess.run(['argo', '--namespace', 'kubeflow', 'delete', workflow_name],
check=True)
def _run_workflow(self,
workflow_file: Text,
workflow_name: Text,
parameter: Dict[Text, Text] = None):
"""Runs the specified workflow with Argo.
Blocks until the workflow has run (successfully or not) to completion.
Args:
workflow_file: YAML file with Argo workflow spec for the pipeline.
workflow_name: Name to use for the workflow.
parameter: mapping from pipeline parameter name to its runtime value.
"""
# TODO(ajaygopinathan): Consider using KFP cli instead.
def _format_parameter(parameter: Dict[Text, Any]) -> List[Text]:
"""Format the pipeline parameter section of argo workflow."""
if parameter:
result = []
for k, v in parameter.items():
result.append('-p')
result.append('{}={}'.format(k, v))
return result
else:
return []
run_command = [
'argo',
'submit',
'--name',
workflow_name,
'--namespace',
'kubeflow',
'--serviceaccount',
'pipeline-runner',
workflow_file,
]
run_command += _format_parameter(parameter)
logging.info('Launching workflow %s with parameter %s', workflow_name,
_format_parameter(parameter))
with test_utils.Timer('RunningPipelineToCompletion'):
subprocess.run(run_command, check=True)
# Wait in the loop while pipeline is pending or running state.
status = 'Pending'
while status in ('Pending', 'Running'):
time.sleep(self._POLLING_INTERVAL_IN_SECONDS)
status = self._get_argo_pipeline_status(workflow_name)
def _delete_pipeline_output(self, pipeline_name: Text):
"""Deletes output produced by the named pipeline.
Args:
pipeline_name: The name of the pipeline.
"""
test_utils.delete_gcs_files(self._GCP_PROJECT_ID, self._BUCKET_NAME,
'test_output/{}'.format(pipeline_name))
def _delete_pipeline_metadata(self, pipeline_name: Text):
"""Drops the database containing metadata produced by the pipeline.
Args:
pipeline_name: The name of the pipeline owning the database.
"""
pod_name = self._get_mysql_pod_name()
db_name = self._get_mlmd_db_name(pipeline_name)
command = [
'kubectl',
'-n',
'kubeflow',
'exec',
'-it',
pod_name,
'--',
'mysql',
'--user',
'root',
'--execute',
'drop database if exists {};'.format(db_name),
]
logging.info('Dropping MLMD DB with name: %s', db_name)
with test_utils.Timer('DeletingMLMDDatabase'):
subprocess.run(command, check=True)
def _pipeline_root(self, pipeline_name: Text):
return os.path.join(self._test_output_dir, pipeline_name)
def _create_pipeline(self, pipeline_name: Text,
components: List[BaseComponent]):
"""Creates a pipeline given name and list of components."""
return tfx_pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=self._pipeline_root(pipeline_name),
components=components,
enable_cache=True,
)
def _create_dataflow_pipeline(self,
pipeline_name: Text,
components: List[BaseComponent],
wait_until_finish_ms: int = 1000 * 60 * 20):
"""Creates a pipeline with Beam DataflowRunner."""
pipeline = self._create_pipeline(pipeline_name, components)
pipeline.beam_pipeline_args = [
'--runner=TestDataflowRunner',
'--wait_until_finish_duration=%d' % wait_until_finish_ms,
'--project=' + self._GCP_PROJECT_ID,
'--temp_location=' +
os.path.join(self._pipeline_root(pipeline_name), 'tmp'),
'--region=' + self._GCP_REGION,
# TODO(b/171733562): Remove `use_runner_v2` once it is the default for
# Dataflow.
'--experiments=use_runner_v2',
]
return pipeline
def _get_kubeflow_metadata_config(
self) -> kubeflow_pb2.KubeflowMetadataConfig:
config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
return config
def _get_argo_pipeline_status(self, workflow_name: Text) -> Text:
"""Get Pipeline status.
Args:
workflow_name: The name of the workflow.
Returns:
Simple status string which is returned from `argo get` command.
"""
get_workflow_command = [
'argo', '--namespace', 'kubeflow', 'get', workflow_name
]
output = subprocess.check_output(get_workflow_command).decode('utf-8')
logging.info('Argo output ----\n%s', output)
match = re.search(r'^Status:\s+(.+)$', output, flags=re.MULTILINE)
self.assertIsNotNone(match)
return match.group(1)
def _compile_and_run_pipeline(self,
pipeline: tfx_pipeline.Pipeline,
workflow_name: Text = None,
parameters: Dict[Text, Any] = None):
"""Compiles and runs a KFP pipeline.
Args:
pipeline: The logical pipeline to run.
workflow_name: The argo workflow name, default to pipeline name.
parameters: Value of runtime paramters of the pipeline.
"""
pipeline_name = pipeline.pipeline_info.pipeline_name
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=self._get_kubeflow_metadata_config(),
tfx_image=self.container_image)
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(pipeline)
file_path = os.path.join(self._test_dir, '{}.tar.gz'.format(pipeline_name))
self.assertTrue(fileio.exists(file_path))
tarfile.TarFile.open(file_path).extract('pipeline.yaml')
pipeline_file = os.path.join(self._test_dir, 'pipeline.yaml')
self.assertIsNotNone(pipeline_file)
workflow_name = workflow_name or pipeline_name
# Ensure cleanup regardless of whether pipeline succeeds or fails.
self.addCleanup(self._delete_workflow, workflow_name)
self.addCleanup(self._delete_pipeline_metadata, pipeline_name)
self.addCleanup(self._delete_pipeline_output, pipeline_name)
# Run the pipeline to completion.
self._run_workflow(pipeline_file, workflow_name, parameters)
# Obtain workflow logs.
get_logs_command = [
'argo', '--namespace', 'kubeflow', 'logs', '-w', workflow_name
]
logs_output = subprocess.check_output(get_logs_command).decode('utf-8')
# Check if pipeline completed successfully.
status = self._get_argo_pipeline_status(workflow_name)
self.assertEqual(
'Succeeded', status, 'Pipeline {} failed to complete successfully: {}'
'\nFailed workflow logs:\n{}'.format(pipeline_name, status,
logs_output))
|
the-stack_106_29895 | """
Source code for the datetime types that Fourth provides.
"""
from __future__ import annotations
__all__ = ("BaseDatetime", "LocalDatetime", "UTCDatetime")
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta, timezone
from operator import ge, gt, le, lt
from typing import Any, Callable, ClassVar, NoReturn, Union
from ._internal import contains_timezone
class BaseDatetime(metaclass=ABCMeta):
"""
Abstract base class for Fourth datetime types.
Contains a single real attribute `_at` which is a datetime.datetime
instance which the Datetime is "at".
Implements __setattr__ and __delattr__ to make instances pseudo-immutable.
"""
# Instance Attributes
_at: datetime
__slots__ = ("_at",)
# Special Methods
@abstractmethod
def __init__(self, from_datetime: datetime) -> None:
"""
Set the _at attribute to the datetime we are initialising from.
Subclasses should implement some validation of from_datetime before
passing it here.
:param from_datetime: The datetime to initialise from.
"""
# use object.__setattr__ to get around pseudo immutability.
object.__setattr__(self, "_at", from_datetime)
def __setattr__(self, name: str, value: Any) -> NoReturn:
"""
Setting attributes is disallowed for pseudo-immutability.
:param name: The name of the attribute being set.
:param value: The value to set the attribute to.
:raises AttributeError: Always raised.
"""
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
def __delattr__(self, name: str) -> NoReturn:
"""
Deleting attributes is disallowed for pseudo-immutability.
:param name: The name of the attribute being deleted.
:raises AttributeError: Always raised.
"""
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
def __repr__(self) -> str:
"""
Construct a command-line representation of the Datetime.
Should be able to eval() this and get back an identical instance.
:return: The representation of the Datetime.
"""
return "{}.at({}, {}, {}, {}, {}, {}, {})".format(
self.__class__.__name__,
self._at.year,
self._at.month,
self._at.day,
self._at.hour,
self._at.minute,
self._at.second,
self._at.microsecond,
)
def __str__(self) -> str:
"""
Construct a string representation of the Datetime.
:return: An ISO format string representation of the Datetime.
"""
return self.iso_format(sep="T", timespec="microseconds")
def __format__(self, format_spec: str) -> str:
"""
Called by the format() built-in function to build a formatted representation.
:param format_spec: The formatting style required.
:return:
"""
if format_spec == "":
return str(self)
else:
return self.strftime(format_spec)
def __getstate__(self) -> datetime:
"""
Called when the object is pickled.
:return: The content of the instance to pickle.
"""
return self._at
def __setstate__(self, state: datetime) -> None:
"""
Called with the result of self.__getstate__() when unpickling.
:param state: The self._at datetime instance that was pickled.
"""
# use object.__setattr__ to get around pseudo immutability.
object.__setattr__(self, "_at", state)
# Constructors
@classmethod
@abstractmethod
def at(
cls,
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
) -> BaseDatetime:
"""
Return a new instance at the specified date and time.
Subclasses must implement this method.
"""
raise NotImplementedError(f"{cls.__name__} does not implement at()")
# Instance Properties
@property
def year(self) -> int:
return self._at.year
@property
def month(self) -> int:
return self._at.month
@property
def day(self) -> int:
return self._at.day
@property
def hour(self) -> int:
return self._at.hour
@property
def minute(self) -> int:
return self._at.minute
@property
def second(self) -> int:
return self._at.second
@property
def microsecond(self) -> int:
return self._at.microsecond
# Instance Methods
def as_datetime(self) -> datetime:
"""
Return a python standard library datetime.datetime instance
corresponding to this Datetime.
:return: A datetime.datetime instance.
"""
return self._at
def iso_format(self, *, sep: str = "T", timespec: str = "microseconds") -> str:
"""
Construct an ISO 8601 format string of the Datetime.
:param sep: Character to separate the date and time components.
:param timespec: How to format the time component.
Has the same meaning and available values as datetime.isoformat().
Defaults to `"microseconds"` since that gives the most information
and is the most consistent.
:return: The ISO 8601 format string representation of the Datetime.
"""
return self._at.isoformat(sep=sep, timespec=timespec)
@abstractmethod
def strftime(self, format_string: str) -> str:
"""
Return a string representation of the Datetime, controlled by the format
string. See datetime.datetime.strftime() for a list of the formatting options.
:param format_string: The format string the representation will match.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement strftime()"
)
class LocalDatetime(BaseDatetime):
"""
A local Datetime with no timezone.
The internal datetime always has `tzinfo=None`
"""
# Class Attributes
min: ClassVar[LocalDatetime]
max: ClassVar[LocalDatetime]
# Instance Attributes
__slots__ = ()
# Special Methods
def __init__(self, at: datetime) -> None:
"""
Initialise a LocalDatetime from a naive datetime.datetime instance.
:param at: A naive datetime.datetime instance for this LocalDatetime.
"""
if at.tzinfo is not None:
raise ValueError(
"LocalDatetime can't be initialised with an aware datetime",
)
super().__init__(at)
def __eq__(self, other: Any) -> bool:
"""
A LocalDateTime can be equal to other LocalDateTime instances and
datetime.datetime instances that are naive.
Explicitly not equal to aware datetime.datetime instances.
:param other: The object to check if equal to.
:return: True if equal. False if not. NotImplemented otherwise.
"""
if isinstance(other, LocalDatetime):
return other._at == self._at
elif isinstance(other, datetime):
return other.tzinfo is None and other == self._at
else:
return NotImplemented
def __hash__(self) -> int:
"""
The hash is the same as the internal datetime's hash. This satisfies the
property that objects which compare equal have the same hash value.
:return: The hash as an integer.
"""
return hash(self._at)
# Rich Comparison Methods
def _rich_compare(self, other: Any, compare: Callable[[Any, Any], bool]) -> bool:
"""
Do a rich comparison with other. This method contains the common logic for all
the rich comparisons.
Instances of LocalDatetime can be compared with other LocalDatetime instances,
and naive datetime.datetime instances.
:param other: The other object to compare to.
:param compare: A function to compare objects once we know we can.
:return: True/False if determined. Otherwise NotImplemented.
"""
if isinstance(other, LocalDatetime):
return compare(self._at, other._at)
elif isinstance(other, datetime) and other.tzinfo is None:
return compare(self._at, other)
else:
return NotImplemented
def __lt__(self, other: Any) -> bool:
return self._rich_compare(other, lt)
def __le__(self, other: Any) -> bool:
return self._rich_compare(other, le)
def __gt__(self, other: Any) -> bool:
return self._rich_compare(other, gt)
def __ge__(self, other: Any) -> bool:
return self._rich_compare(other, ge)
# Numeric Methods
def __add__(self, other: Any) -> LocalDatetime:
"""
Add a LocalDatetime and a timedelta.
:param other: The timedelta to add to.
:return: A LocalDatetime which is the result.
"""
if isinstance(other, timedelta):
return LocalDatetime(self._at + other)
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other: Any) -> Union[LocalDatetime, timedelta]:
"""
Subtract a LocalDatetime instance, or a naive datetime, or a timedelta,
from this LocalDatetime.
:param other: The object being subtracted from this.
:return: Either a timedelta of the difference between two datetimes,
or a LocalDatetime.
"""
if isinstance(other, LocalDatetime):
return self._at - other._at
elif isinstance(other, datetime) and other.tzinfo is None:
return self._at - other
elif isinstance(other, timedelta):
return LocalDatetime(self._at - other)
else:
return NotImplemented
def __rsub__(self, other: Any) -> timedelta:
"""
Subtract this LocalDatetime from a naive datetime.
:param other: The naive datetime.
:return: A timedelta of the difference between the datetimes.
"""
if isinstance(other, datetime) and other.tzinfo is None:
return other - self._at
else:
return NotImplemented
# Constructors
@classmethod
def at(
cls,
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
) -> LocalDatetime:
"""
Return a new LocalDatetime at the specified time.
The year, month and day arguments are required.
All arguments must be integers.
:param year:
:param month:
:param day:
:param hour:
:param minute:
:param second:
:param microsecond:
:return: A LocalDatetime instance at the specified time.
"""
return cls(
datetime(
year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=second,
microsecond=microsecond,
tzinfo=None,
)
)
@classmethod
def now(cls) -> LocalDatetime:
"""
Return a new LocalDatetime instance for the current date and time.
:return: A LocalDatetime instance for the current date and time.
"""
return cls(datetime.now())
@classmethod
def from_iso_format(cls, date_string: str) -> LocalDatetime:
"""
Return a new LocalDatetime instance corresponding to the ISO 8601
formatted datetime string.
The datetime string must not contain timezone information.
This is intended to be the inverse of LocalDatetime.iso_format().
Parsing Arbitrary ISO 8601 strings is not supported.
:param date_string: The ISO 8601 formatted datetime string.
:return: The corresponding LocalDatetime instance.
:raises ValueError: When the datetime string contains tz info.
"""
datetime_obj = datetime.fromisoformat(date_string)
if datetime_obj.tzinfo is not None:
raise ValueError("fromisoformat: date_string contained tz info")
return cls(datetime_obj)
@classmethod
def strptime(cls, date_string: str, format_string: str) -> LocalDatetime:
"""
Returns a new LocalDatetime instance corresponding to the datetime
string after being parsed according to the format string.
Uses datetime.datetime.strptime to parse the strings.
The datetime and format strings must not have a timezone component.
:param date_string: The datetime string.
:param format_string: The format string.
:return: The corresponding LocalDatetime instance.
:raises ValueError: When the strings have a timezone component.
"""
datetime_obj = datetime.strptime(date_string, format_string)
if datetime_obj.tzinfo is not None:
raise ValueError("strptime: date_string contained tz info")
return cls(datetime_obj)
# Instance Methods
def strftime(self, format_string: str) -> str:
"""
Return a string representation of the date and time, controlled by the format
string. See datetime.datetime.strftime() for a list of the formatting options.
The format string must not contain timezone directive (%z, %Z), since
LocalDatetime has no timezone information.
:param format_string: The format string the representation will match.
:return: The string representation of the date and time.
:raises ValueError: When the format string contains timezone directives.
"""
if contains_timezone(format_string):
raise ValueError(
"format string for LocalDatetime.strftime() must not contain timezone "
"directives ('%z', '%Z')"
)
return self._at.strftime(format_string)
LocalDatetime.min = LocalDatetime(datetime.min)
LocalDatetime.max = LocalDatetime(datetime.max)
class UTCDatetime(BaseDatetime):
"""
A Datetime in the UTC timezone.
The internal datetime always has `tzinfo=timezone.utc`
"""
# Class Attributes
min: ClassVar[UTCDatetime]
max: ClassVar[UTCDatetime]
# Instance Attributes
__slots__ = ()
# Special Methods
def __init__(self, at: datetime) -> None:
"""
Initialise a UTCDatetime from an aware datetime.datetime instance.
:param at: An aware datetime.datetime instance for this UTCDatetime.
:raises ValueError: When the `at` argument is naive.
"""
if at.tzinfo is None:
raise ValueError("UTCDatetime can't be initialised with a naive datetime")
at = at.astimezone(timezone.utc)
super().__init__(at)
def __eq__(self, other: Any) -> bool:
"""
A UTCDateTime can be equal to other UTCDateTime instances and
datetime.datetime instances that are aware.
Explicitly not equal to naive datetime.datetime instances.
:param other: The object to check if equal to.
:return: True if equal. False if not. NotImplemented otherwise.
"""
if isinstance(other, UTCDatetime):
return other._at == self._at
elif isinstance(other, datetime):
return other.tzinfo is not None and other == self._at
else:
return NotImplemented
def __hash__(self) -> int:
"""
The hash is the same as the internal datetime's hash. This satisfies the
property that objects which compare equal have the same hash value.
:return: The hash as an integer.
"""
return hash(self._at)
# Rich Comparison Methods
def _rich_compare(self, other: Any, compare: Callable[[Any, Any], bool]) -> bool:
"""
Do a rich comparison with other. This method contains the common logic for all
the rich comparisons.
Instances of UTCDatetime can be compared with other UTCDatetime instances,
and aware datetime.datetime instances.
:param other: The other object to compare to.
:param compare: A function to compare objects once we know we can.
:return: True/False if determined. Otherwise NotImplemented.
"""
if isinstance(other, UTCDatetime):
return compare(self._at, other._at)
elif isinstance(other, datetime) and other.tzinfo is not None:
return compare(self._at, other)
else:
return NotImplemented
def __lt__(self, other: Any) -> bool:
return self._rich_compare(other, lt)
def __le__(self, other: Any) -> bool:
return self._rich_compare(other, le)
def __gt__(self, other: Any) -> bool:
return self._rich_compare(other, gt)
def __ge__(self, other: Any) -> bool:
return self._rich_compare(other, ge)
# Numeric Methods
def __add__(self, other: Any) -> UTCDatetime:
"""
Add a UTCDatetime and a timedelta.
:param other: The timedelta to add to.
:return: A UTCDatetime which is the result.
"""
if isinstance(other, timedelta):
return UTCDatetime(self._at + other)
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other: Any) -> Union[UTCDatetime, timedelta]:
"""
Subtract a UTCDatetime instance, or an aware datetime, or a timedelta,
from this UTCDatetime.
:param other: The object being subtracted from this.
:return: Either a timedelta of the difference between two datetimes,
or a UTCDatetime.
"""
if isinstance(other, UTCDatetime):
return self._at - other._at
elif isinstance(other, datetime) and other.tzinfo is not None:
return self._at - other
elif isinstance(other, timedelta):
return UTCDatetime(self._at - other)
else:
return NotImplemented
def __rsub__(self, other: Any) -> timedelta:
"""
Subtract this UTCDatetime from an aware datetime.
:param other: The aware datetime.
:return: A timedelta of the difference between the datetimes.
"""
if isinstance(other, datetime) and other.tzinfo is not None:
return other - self._at
else:
return NotImplemented
# Constructors
@classmethod
def at(
cls,
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
) -> UTCDatetime:
"""
Return a new UTCDatetime at the specified time.
The year, month and day arguments are required.
All arguments must be integers.
:param year:
:param month:
:param day:
:param hour:
:param minute:
:param second:
:param microsecond:
:return: A UTCDatetime instance at the specified time.
"""
return cls(
datetime(
year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=second,
microsecond=microsecond,
tzinfo=timezone.utc,
)
)
@classmethod
def now(cls) -> UTCDatetime:
"""
Return a new UTCDatetime instance for the current UTC date and time.
:return: A UTCDatetime instance for the current UTC date and time.
"""
return cls(datetime.now(timezone.utc))
@classmethod
def from_timestamp(cls, timestamp: Union[int, float]) -> UTCDatetime:
"""
Return a new UTCDatetime instance corresponding to the POSIX timestamp.
This method is only available on UTCDatetime since POSIX timestamps are
inherently 'in' the UTC timezone.
Constructing a LocalDatetime from a POSIX timestamp would result in a
loss of data/context.
:param timestamp: The POSIX timestamp.
:return: The corresponding UTCDatetime instance.
"""
return cls(datetime.fromtimestamp(timestamp, timezone.utc))
@classmethod
def from_iso_format(cls, date_string: str) -> UTCDatetime:
"""
Return a new UTCDatetime instance corresponding to the ISO 8601
formatted datetime string.
The datetime string must contain some timezone information, so the date
and time can be converted to UTC.
This is intended to be the inverse of UTCDatetime.iso_format().
Parsing Arbitrary ISO 8601 strings is not supported.
:param date_string: The ISO 8601 formatted datetime string.
:return: The corresponding UTCDatetime instance.
:raises ValueError: When the datetime string doesn't contain tz info.
"""
datetime_obj = datetime.fromisoformat(date_string)
if datetime_obj.tzinfo is None:
raise ValueError("fromisoformat: date_string didn't contain tz info")
return cls(datetime_obj)
@classmethod
def strptime(cls, date_string: str, format_string: str) -> UTCDatetime:
"""
Returns a new UTCDatetime instance corresponding to the datetime string
after being parsed according to the format string.
Uses datetime.datetime.strptime to parse the strings.
The datetime and format strings must have a timezone component so that
the date and time can be converted to UTC.
:param date_string: The datetime string.
:param format_string: The format string.
:return: The corresponding UTCDatetime instance.
:raises ValueError: When the strings don't have a timezone component.
"""
datetime_obj = datetime.strptime(date_string, format_string)
if datetime_obj.tzinfo is None:
raise ValueError("strptime: date_string didn't contain tz info")
return cls(datetime_obj)
# Instance Methods
def strftime(self, format_string: str) -> str:
"""
Return a string representation of the date and time, controlled by the format
string. See datetime.datetime.strftime() for a list of the formatting options.
:param format_string: The format string the representation will match.
:return: The string representation of the date and time.
"""
return self._at.strftime(format_string)
UTCDatetime.min = UTCDatetime(datetime.min.replace(tzinfo=timezone.utc))
UTCDatetime.max = UTCDatetime(datetime.max.replace(tzinfo=timezone.utc))
|
the-stack_106_29897 | load("//dart:dart_proto_compile.bzl", "dart_proto_compile")
load("@io_bazel_rules_dart//dart/build_rules:core.bzl", "dart_library")
def dart_proto_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
dart_proto_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
dart_library(
name = name,
srcs = [name_pb],
deps = [
str(Label("@vendor_protobuf//:protobuf")),
],
pub_pkg_name = name,
visibility = visibility,
)
|
the-stack_106_29901 | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import accuracy, AverageMeter
from config2 import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
def parse_option():
parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
# easy config modification
parser.add_argument('--batch-size', type=int, help="batch size for single GPU")
parser.add_argument('--data-path', type=str, help='path to dataset')
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--output', default='output', type=str, metavar='PATH',
help='root of output folder, the full path is <output>/<model_name>/<tag> (default: output)')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
# distributed training
parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel')
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
def main(config):
dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(config)
logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}")
model = build_model(config)
model.cuda()
logger.info(str(model))
optimizer = build_optimizer(config, model)
if config.AMP_OPT_LEVEL != "O0":
model, optimizer = amp.initialize(model, optimizer, opt_level=config.AMP_OPT_LEVEL)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))
if config.AUG.MIXUP > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = torch.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}")
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f'auto resuming from {resume_file}')
else:
logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')
if config.MODEL.RESUME:
max_accuracy = load_checkpoint(config, model_without_ddp, optimizer, lr_scheduler, logger)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
if config.EVAL_MODE:
return
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
logger.info("Start training")
start_time = time.time()
for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):
data_loader_train.sampler.set_epoch(epoch)
train_one_epoch(config, model, criterion, data_loader_train, optimizer, epoch, mixup_fn, lr_scheduler)
if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):
save_checkpoint(config, epoch, model_without_ddp, max_accuracy, optimizer, lr_scheduler, logger)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time {}'.format(total_time_str))
def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler):
model.train()
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
outputs = model(samples)
if config.TRAIN.ACCUMULATION_STEPS > 1:
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(amp.master_params(optimizer))
else:
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step_update(epoch * num_steps + idx)
else:
loss = criterion(outputs, targets)
optimizer.zero_grad()
if config.AMP_OPT_LEVEL != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(amp.master_params(optimizer))
else:
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD)
else:
grad_norm = get_grad_norm(model.parameters())
optimizer.step()
lr_scheduler.step_update(epoch * num_steps + idx)
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
epoch_time = time.time() - start
logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
@torch.no_grad()
def validate(config, data_loader, model):
criterion = torch.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
logger.info(
f'Test: [{idx}/{len(data_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
f'Mem {memory_used:.0f}MB')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for idx, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
if __name__ == '__main__':
_, config = parse_option()
if config.AMP_OPT_LEVEL != "O0":
assert amp is not None, "amp not installed!"
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
torch.cuda.set_device(config.LOCAL_RANK)
torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier()
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear scale the learning rate according to total batch size, may not be optimal
linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}")
if dist.get_rank() == 0:
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
# print config
logger.info(config.dump())
main(config)
|
the-stack_106_29904 | #!/usr/bin/env python3
import os
import subprocess
from typing import List, Optional
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
def run_cmd(cmd: List[str]) -> str:
return subprocess.check_output(cmd, encoding='utf8').strip()
def run_cmd_default(cmd: List[str], default: Optional[str] = None) -> Optional[str]:
try:
return run_cmd(cmd)
except subprocess.CalledProcessError:
return default
def get_git_commit(branch: str = "HEAD", default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", branch], default=default)
def get_git_branch(default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "HEAD"], default=default)
def get_git_full_branchname(default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], default=default)
def get_git_remote(default: Optional[str] = None) -> Optional[str]:
try:
local_branch = run_cmd(["git", "name-rev", "--name-only", "HEAD"])
tracking_remote = run_cmd(["git", "config", "branch." + local_branch + ".remote"])
return run_cmd(["git", "config", "remote." + tracking_remote + ".url"])
except subprocess.CalledProcessError: # Not on a branch, fallback
return run_cmd_default(["git", "config", "--get", "remote.origin.url"], default=default)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
training_version: bytes = b"0.2.0"
terms_version: bytes = b"2"
dirty: bool = True
comma_remote: bool = False
tested_branch: bool = False
origin = get_git_remote()
branch = get_git_full_branchname()
if (origin is not None) and (branch is not None):
try:
comma_remote = origin.startswith('[email protected]:commaai') or origin.startswith('https://github.com/commaai')
tested_branch = get_git_branch() in ['devel', 'release2-staging', 'dashcam-staging', 'release2', 'dashcam']
dirty = False
# Actually check dirty files
if not prebuilt:
# This is needed otherwise touched files might show up as modified
try:
subprocess.check_call(["git", "update-index", "--refresh"])
except subprocess.CalledProcessError:
pass
dirty = (subprocess.call(["git", "diff-index", "--quiet", branch, "--"]) != 0)
# Log dirty files
if dirty and comma_remote:
try:
dirty_files = ""#run_cmd(["git", "diff-index", branch, "--"])
cloudlog.event("dirty comma branch", version=version, dirty=dirty, origin=origin, branch=branch,
dirty_files=dirty_files, commit=get_git_commit(), origin_commit=get_git_commit(branch))
except subprocess.CalledProcessError:
pass
dirty = dirty or (not comma_remote)
dirty = dirty or ('master' in branch)
except subprocess.CalledProcessError:
dirty = True
cloudlog.exception("git subprocess failed while checking dirty")
if __name__ == "__main__":
print("Dirty: %s" % dirty)
print("Version: %s" % version)
print("Remote: %s" % origin)
print("Branch: %s" % branch)
print("Prebuilt: %s" % prebuilt)
|
the-stack_106_29906 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pytype: skip-file
"""
Implements the NumPy API, using the primitives in :mod:`jax.lax`.
NumPy operations are implemented in Python in terms of the primitive operations
in :mod:`jax.lax`. Since NumPy operations are not primitive and instead are
implemented in terms of :mod:`jax.lax` operations, we do not need to define
transformation rules such as gradient or batching rules. Instead,
transformations for NumPy primitives can be derived from the transformation
rules for the underlying :code:`lax` primitives.
"""
import builtins
import collections
from collections.abc import Sequence
import itertools
import os
import re
import string
import types
from typing import Callable
import warnings
import numpy as onp
import opt_einsum
from jax import jit, device_put
from .. import core
from .. import dtypes
from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray
from ..config import flags
from ..interpreters.xla import DeviceArray
from .. import lax
from ..util import partial, get_module_functions, unzip2, prod as _prod, subvals
from ..lib import pytree
from ..lib import xla_client
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'),
enum_values=['allow', 'warn', 'raise'],
help=
'Control NumPy-style automatic rank promotion broadcasting '
'("allow", "warn", or "raise").')
newaxis = None
# We replace some builtin names to follow Numpy's API, so we capture here.
_abs = builtins.abs
_all = builtins.all
_any = builtins.any
_max = builtins.max
_min = builtins.min
_sum = builtins.sum
_divmod = builtins.divmod
# NumPy constants
pi = onp.pi
e = onp.e
euler_gamma = onp.euler_gamma
inf = onp.inf
NINF = onp.NINF
PZERO = onp.PZERO
NZERO = onp.NZERO
nan = onp.nan
# And some numpy utility functions
set_printoptions = onp.set_printoptions
# We want isinstance(x, np.ndarray) checks in user code to work with the our
# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract
# array base class). We can override the isinstance behavior directly, without
# having the complexity of multiple inheritance on those classes, by defining
# the ndarray class to have a metaclass with special __instancecheck__ behavior.
_arraylike_types = (onp.ndarray, UnshapedArray, DeviceArray)
class _ArrayMeta(type(onp.ndarray)): # type: ignore
"""Metaclass for overriding ndarray isinstance checks."""
def __instancecheck__(self, instance):
try:
return isinstance(instance.aval, _arraylike_types)
except AttributeError:
return isinstance(instance, _arraylike_types)
class ndarray(onp.ndarray, metaclass=_ArrayMeta):
def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,
order=None):
raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly."
" Use jax.numpy.array, or jax.numpy.zeros instead.")
iscomplexobj = onp.iscomplexobj
shape = _shape = onp.shape
ndim = _ndim = onp.ndim
size = onp.size
_dtype = dtypes.result_type
# At present JAX doesn't have a reason to distinguish between scalars and arrays
# in its object system. Further, we want JAX scalars to have the same type
# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX
# scalar object with JAX promotion behaviors, instead we make the JAX scalar
# types return JAX arrays when instantiated.
class _ScalarMeta(type):
def __hash__(self):
return hash(self.dtype.type)
def __eq__(self, other):
return id(self) == id(other) or self.dtype == other
def __ne__(self, other):
return not (self == other)
def __call__(self, x):
return array(self.dtype.type(x), dtype=self.dtype)
def _make_scalar_type(onp_scalar_type):
return _ScalarMeta(onp_scalar_type.__name__, (object,),
{"dtype": onp.dtype(onp_scalar_type)})
bool_ = _make_scalar_type(onp.bool_)
uint8 = _make_scalar_type(onp.uint8)
uint16 = _make_scalar_type(onp.uint16)
uint32 = _make_scalar_type(onp.uint32)
uint64 = _make_scalar_type(onp.uint64)
int8 = _make_scalar_type(onp.int8)
int16 = _make_scalar_type(onp.int16)
int32 = _make_scalar_type(onp.int32)
int64 = _make_scalar_type(onp.int64)
bfloat16 = _make_scalar_type(dtypes.bfloat16)
float16 = _make_scalar_type(onp.float16)
float32 = single = _make_scalar_type(onp.float32)
float64 = double = _make_scalar_type(onp.float64)
complex64 = csingle = _make_scalar_type(onp.complex64)
complex128 = cdouble = _make_scalar_type(onp.complex128)
int_ = int32 if dtypes.int_ == onp.int32 else int64
float_ = float32 if dtypes.float_ == onp.float32 else float64
complex_ = complex64 if dtypes.complex_ == onp.complex64 else complex128
number = onp.number
inexact = onp.inexact
complexfloating = onp.complexfloating
floating = onp.floating
integer = onp.integer
signedinteger = onp.signedinteger
unsignedinteger = onp.unsignedinteger
flexible = onp.flexible
character = onp.character
object_ = onp.object_
iinfo = dtypes.iinfo
dtype = onp.dtype
can_cast = dtypes.can_cast
issubsctype = dtypes.issubsctype
promote_types = dtypes.promote_types
ComplexWarning = onp.ComplexWarning
array_str = onp.array_str
array_repr = onp.array_repr
save = onp.save
savez = onp.savez
load = onp.load
### utility functions
def _promote_shapes(fun_name, *args):
"""Prepend implicit leading singleton dimensions for Numpy broadcasting."""
if len(args) < 2:
return args
else:
shapes = [shape(arg) for arg in args]
nonscalar_ranks = [len(shp) for shp in shapes if shp]
if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:
return args
else:
if FLAGS.jax_numpy_rank_promotion != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
result_rank = len(lax.broadcast_shapes(*shapes))
return [lax.reshape(arg, (1,) * (result_rank - len(shp)) + shp)
if shp and len(shp) != result_rank else arg
for arg, shp in zip(args, shapes)]
def _rank_promotion_warning_or_error(fun_name, shapes):
if FLAGS.jax_numpy_rank_promotion == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif FLAGS.jax_numpy_rank_promotion == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def _promote_dtypes(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return args
else:
to_dtype = result_type(*args)
return [lax.convert_element_type(x, to_dtype) for x in args]
def _promote_dtypes_inexact(*args):
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to an inexact type."""
to_dtype = _to_inexact_dtype(result_type(*args))
return [lax.convert_element_type(x, to_dtype) for x in args]
def _to_inexact_dtype(dtype):
"""Promotes a dtype into an inexact dtype, if it is not already one."""
return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)
def _complex_elem_type(dtype):
"""Returns the float type of the real/imaginary parts of a complex dtype."""
return onp.abs(onp.zeros((), dtype)).dtype
def _result_dtype(op, *args):
"""Compute result dtype of applying op to arguments with given dtypes."""
args = [onp.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]
return _dtype(op(*args))
def _arraylike(x): return isinstance(x, ndarray) or isscalar(x)
def _check_arraylike(fun_name, *args):
"""Check if all args fit JAX's definition of arraylike (ndarray or scalar)."""
if _any(not _arraylike(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not _arraylike(arg))
msg = "{} requires ndarray or scalar arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def _promote_args(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes(*args))
def _promote_args_inexact(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion.
Promotes non-inexact types to an inexact type."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))
def _constant_like(x, const):
return onp.array(const, dtype=_dtype(x))
def update_numpydoc(docstr, fun, op):
'''Transforms the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version'''
#Some numpy functions have an extra tab at the beginning of each line,
#If this function is one of those we remove this extra tab from all the lines
if not hasattr(op, '__code__'):
return docstr
if docstr[:4] == ' ':
lines = docstr.split('\n')
for idx, line in enumerate(lines):
lines[idx] = line.replace(' ', '', 1)
docstr = '\n'.join(lines)
begin_idx = docstr.find("Parameters")
begin_idx = docstr.find("--\n", begin_idx) + 2
end_idx = docstr.find("Returns", begin_idx)
parameters = docstr[begin_idx:end_idx]
param_list = parameters.replace('\n ', '@@').split('\n')
for idx, p in enumerate(param_list):
param = p[:p.find(' : ')].split(", ")[0]
if param not in op.__code__.co_varnames:
param_list[idx] = ''
param_list = [param for param in param_list if param != '']
parameters = '\n'.join(param_list).replace('@@', '\n ')
return docstr[:begin_idx + 1] + parameters + docstr[end_idx - 2:]
_numpy_signature_re = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\([\w\W]*\)$')
def _wraps(fun, update_doc=True, lax_description=""):
"""Like functools.wraps but works with numpy.ufuncs.
It is important that when wrapping numpy functions the parameters names
in the original function and in the JAX version are the same
Parameters:
fun: The function being wrapped
update_doc: whether to transform the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version.
If False, include the numpy docstring verbatim.
"""
def wrap(op):
if not hasattr(fun, '__doc__') or fun.__doc__ is None:
return op
try:
# Numpy doc comments have the form:
# fn(x, y, z) (optional)
#
# A one-line summary
#
# ... everything else ...
# We (a) move the summary to the top, since it is what the Sphinx
# autosummary extension expects, and (b) add a comment below the summary
# to the effect that this is a LAX wrapper of a Numpy function.
sections = fun.__doc__.split("\n\n")
signatures = []
summary = None
for i in range(len(sections)):
if _numpy_signature_re.match(sections[i]):
signatures.append(sections[i])
else:
summary = sections[i].strip()
break
body = "\n\n".join(signatures + sections[i + 1:])
if update_doc:
body = update_numpydoc(body, fun, op)
desc = lax_description + "\n" if lax_description else ""
docstr = (
"{summary}\n\nLAX-backend implementation of :func:`{fun}`.\n"
"{lax_description}Original docstring below.\n\n{body}"
.format(summary=summary, lax_description=desc,
fun=fun.__name__, body=body))
op.__name__ = fun.__name__
op.__doc__ = docstr
finally:
return op
return wrap
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims)."""
axis = int(axis)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
return axis
### implementations of numpy functions in terms of lax
@_wraps(onp.finfo)
def finfo(dtype): return dtypes.finfo(dtype)
@_wraps(onp.issubdtype)
def issubdtype(arg1, arg2): return dtypes.issubdtype(arg1, arg2)
@_wraps(onp.isscalar)
def isscalar(num): return dtypes.is_python_scalar(num) or onp.isscalar(num)
iterable = onp.iterable
@_wraps(onp.result_type)
def result_type(*args):
return dtypes.result_type(*args)
def _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False):
if promote_to_inexact:
def fn(x):
x = lax.convert_element_type(x, _to_inexact_dtype(_dtype(x)))
return lax_fn(x)
else:
fn = lambda x: lax_fn(x)
return _wraps(numpy_fn)(fn)
def _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False):
if promote_to_inexact:
fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn, x1, x2))
else:
fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))
return _wraps(numpy_fn)(fn)
def _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
absolute = abs = _one_to_one_unop(onp.absolute, lax.abs)
fabs = _one_to_one_unop(onp.fabs, lax.abs, True)
bitwise_not = _one_to_one_unop(onp.bitwise_not, lax.bitwise_not)
negative = _one_to_one_unop(onp.negative, lax.neg)
positive = _one_to_one_unop(onp.positive, lambda x: x)
floor = _one_to_one_unop(onp.floor, lax.floor, True)
ceil = _one_to_one_unop(onp.ceil, lax.ceil, True)
exp = _one_to_one_unop(onp.exp, lax.exp, True)
log = _one_to_one_unop(onp.log, lax.log, True)
expm1 = _one_to_one_unop(onp.expm1, lax.expm1, True)
log1p = _one_to_one_unop(onp.log1p, lax.log1p, True)
sin = _one_to_one_unop(onp.sin, lax.sin, True)
cos = _one_to_one_unop(onp.cos, lax.cos, True)
tan = _one_to_one_unop(onp.tan, lax.tan, True)
arcsin = _one_to_one_unop(onp.arcsin, lax.asin, True)
arccos = _one_to_one_unop(onp.arccos, lax.acos, True)
arctan = _one_to_one_unop(onp.arctan, lax.atan, True)
sinh = _one_to_one_unop(onp.sinh, lax.sinh, True)
cosh = _one_to_one_unop(onp.cosh, lax.cosh, True)
arcsinh = _one_to_one_unop(onp.arcsinh, lax.asinh, True)
tanh = _one_to_one_unop(onp.tanh, lax.tanh, True)
arcsinh = _one_to_one_unop(onp.arcsinh, lax.asinh, True)
arccosh = _one_to_one_unop(onp.arccosh, lax.acosh, True)
arctanh = _one_to_one_unop(onp.arctanh, lax.atanh, True)
sqrt = _one_to_one_unop(onp.sqrt, lax.sqrt, True)
add = _maybe_bool_binop(onp.add, lax.add, lax.bitwise_or)
bitwise_and = _one_to_one_binop(onp.bitwise_and, lax.bitwise_and)
bitwise_or = _one_to_one_binop(onp.bitwise_or, lax.bitwise_or)
bitwise_xor = _one_to_one_binop(onp.bitwise_xor, lax.bitwise_xor)
right_shift = _one_to_one_binop(onp.right_shift, lax.shift_right_arithmetic)
left_shift = _one_to_one_binop(onp.left_shift, lax.shift_left)
equal = _one_to_one_binop(onp.equal, lax.eq)
multiply = _maybe_bool_binop(onp.multiply, lax.mul, lax.bitwise_and)
not_equal = _one_to_one_binop(onp.not_equal, lax.ne)
subtract = _one_to_one_binop(onp.subtract, lax.sub)
arctan2 = _one_to_one_binop(onp.arctan2, lax.atan2, True)
minimum = _one_to_one_binop(onp.minimum, lax.min)
maximum = _one_to_one_binop(onp.maximum, lax.max)
float_power = _one_to_one_binop(onp.float_power, lax.pow, True)
nextafter = _one_to_one_binop(onp.nextafter, lax.nextafter, True)
def _comparison_op(numpy_fn, lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
# Comparison on complex types are defined as a lexicographic ordering on
# the (real, imag) pair.
if issubdtype(_dtype(x1), complexfloating):
rx = lax.real(x1)
ry = lax.real(x2)
return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),
lax_fn(rx, ry))
return lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
greater_equal = _comparison_op(onp.greater_equal, lax.ge)
greater = _comparison_op(onp.greater, lax.gt)
less_equal = _comparison_op(onp.less_equal, lax.le)
less = _comparison_op(onp.less, lax.lt)
def _logical_op(np_op, bitwise_op):
@_wraps(np_op, update_doc=False)
def op(*args):
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))
for x in args)
return bitwise_op(*_promote_args(np_op.__name__, *args))
return op
logical_and = _logical_op(onp.logical_and, lax.bitwise_and)
logical_not = _logical_op(onp.logical_not, lax.bitwise_not)
logical_or = _logical_op(onp.logical_or, lax.bitwise_or)
logical_xor = _logical_op(onp.logical_xor, lax.bitwise_xor)
@_wraps(onp.sign)
def sign(x):
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
re = lax.real(x)
return lax.complex(
lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0))
return lax.sign(x)
@_wraps(onp.copysign)
def copysign(x1, x2):
if issubdtype(_dtype(x1), complexfloating) or issubdtype(_dtype(x2), complexfloating):
raise TypeError("copysign does not support complex-valued inputs")
x1, x2 = _promote_shapes("copysign", x1, x2)
return where(signbit(x2), -lax.abs(x1), lax.abs(x1))
@_wraps(onp.true_divide)
def true_divide(x1, x2):
x1, x2 = _promote_args_inexact("true_divide", x1, x2)
return lax.div(x1, x2)
@_wraps(onp.divide)
def divide(x1, x2):
# decide whether to perform integer division based on Numpy result dtype, as a
# way to check whether Python 3 style division is active in Numpy
result_dtype = _result_dtype(onp.divide, x1, x2)
if issubdtype(result_dtype, integer):
return floor_divide(x1, x2)
else:
return true_divide(x1, x2)
@_wraps(onp.floor_divide)
def floor_divide(x1, x2):
x1, x2 = _promote_args("floor_divide", x1, x2)
dtype = _dtype(x1)
if issubdtype(dtype, integer):
quotient = lax.div(x1, x2)
select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
# TODO(mattjj): investigate why subtracting a scalar was causing promotion
return where(select, quotient - onp.array(1, _dtype(quotient)), quotient)
elif issubdtype(dtype, complexfloating):
x1r = lax.real(x1)
x1i = lax.imag(x1)
x2r = lax.real(x2)
x2i = lax.imag(x2)
which = lax.ge(lax.abs(x2r), lax.abs(x2i))
rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))
rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))
out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),
lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))
return lax.convert_element_type(out, dtype)
else:
return _float_divmod(x1, x2)[0]
@_wraps(onp.divmod)
def divmod(x1, x2):
x1, x2 = _promote_args("divmod", x1, x2)
if issubdtype(_dtype(x1), integer):
return floor_divide(x1, x2), remainder(x1, x2)
else:
return _float_divmod(x1, x2)
def _float_divmod(x1, x2):
# see float_divmod in floatobject.c of CPython
mod = lax.rem(x1, x2)
div = lax.div(lax.sub(x1, mod), x2)
ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))
mod = lax.select(ind, mod + x2, mod)
div = lax.select(ind, div - _constant_like(div, 1), div)
return lax.round(div), mod
@_wraps(onp.power)
def power(x1, x2):
x1, x2 = _promote_args(onp.power, x1, x2)
dtype = _dtype(x1)
if not issubdtype(dtype, integer):
return lax.pow(x1, x2)
# Integer power => use binary exponentiation.
# TODO(phawkins): add integer pow support to XLA.
bits = 6 # Anything more would overflow for any x1 > 1
acc = ones(shape(x1), dtype=dtype)
for _ in range(bits):
acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)),
lax.mul(acc, x1), acc)
x1 = lax.mul(x1, x1)
x2 = lax.shift_right_logical(x2, _constant_like(x2, 1))
return acc
@_wraps(onp.logaddexp)
def logaddexp(x1, x2):
x1, x2 = _promote_shapes("logaddexp", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))
@_wraps(onp.logaddexp2)
def logaddexp2(x1, x2):
x1, x2 = _promote_shapes("logaddexp2", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))),
_constant_like(x1, onp.log(2)))))
@_wraps(onp.log2)
def log2(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))
@_wraps(onp.log10)
def log10(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))
@_wraps(onp.exp2)
def exp2(x):
x, = _promote_dtypes_inexact(x)
return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))
@_wraps(onp.signbit)
def signbit(x):
x, = _promote_shapes("signbit", x)
dtype = _dtype(x)
if issubdtype(dtype, integer):
return lax.lt(x, _constant_like(x, 0))
elif issubdtype(dtype, bool_):
return full_like(x, False, dtype=bool_)
elif not issubdtype(dtype, floating):
raise ValueError(
"jax.numpy.signbit is not well defined for %s" % dtype)
# TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to
# F32.
if dtype == bfloat16:
dtype = float32
x = lax.convert_element_type(x, float32)
info = finfo(dtype)
if info.bits == 16:
int_type = onp.int16
elif info.bits == 32:
int_type = onp.int32
elif info.bits == 64:
int_type = onp.int64
else:
raise NotImplementedError(
"jax.numpy.signbit only supports 16, 32, and 64-bit types.")
x = lax.bitcast_convert_type(x, int_type)
return lax.convert_element_type(x >> (info.nexp + info.nmant), onp.bool)
@_wraps(onp.trunc)
def trunc(x):
return where(lax.lt(x, lax._const(x, 0)), lax.ceil(x), lax.floor(x))
def _conv(x, y, mode, op):
if issubdtype(x.dtype, complexfloating) or issubdtype(y.dtype, complexfloating):
raise NotImplementedError(f"{op}() does not support complex inputs")
if ndim(x) != 1 or ndim(y) != 1:
raise ValueError(f"{op}() only support 1-dimensional inputs.")
x, y = _promote_dtypes_inexact(x, y)
out_order = slice(None)
if len(x) < len(y):
x, y = y, x
if op == "correlate":
out_order = slice(None, None, -1)
if op == 'convolve':
y = y[::-1]
if mode == 'valid':
padding = [(0, 0)]
elif mode == 'same':
padding = [(y.shape[0] // 2, y.shape[0] - y.shape[0] // 2 - 1)]
elif mode == 'full':
padding = [(y.shape[0] - 1, y.shape[0] - 1)]
else:
raise ValueError("mode must be one of ['full', 'same', 'valid']")
result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,), padding)
return result[0, 0, out_order]
@_wraps(onp.convolve)
def convolve(x, y, mode='full'):
return _conv(x, y, mode, 'convolve')
@_wraps(onp.correlate)
def correlate(x, y, mode='valid'):
return _conv(x, y, mode, 'correlate')
def _normalize_float(x):
info = finfo(_dtype(x))
cond = lax.abs(x) < info.tiny
x1 = where(cond, x * (1 << info.nmant), x)
x2 = where(cond,
full_like(x, -info.nmant, dtype=onp.int32),
zeros_like(x, dtype=onp.int32))
return lax.convert_element_type(x1, _dtype(x)), x2
_INT_DTYPES = {
16: onp.int16,
32: onp.int32,
64: onp.int64,
}
@_wraps(onp.ldexp)
@jit
def ldexp(x1, x2):
dtype = _result_dtype(onp.ldexp, x1, x2)
x1, x2 = _promote_shapes("ldexp", x1, x2)
x1 = lax.convert_element_type(x1, dtype)
info = finfo(dtype)
mask = (1 << info.nexp) - 1
bias = ((1 << info.nexp) - 1) >> 1
int_type = _INT_DTYPES[info.bits]
x, e = _normalize_float(x1)
x2 += lax.convert_element_type(e, onp.int32)
x = lax.bitcast_convert_type(x, int_type)
x2 += ((x >> info.nmant) & mask) - bias
# find underflow/overflow before denormalization
underflow_cond = x2 < -(bias + info.nmant)
overflow_cond = x2 > bias
m = ones_like(x, dtype=dtype)
# denormals
cond = x2 < -bias + 1
x2 = where(cond, x2 + info.nmant, x2)
m = where(cond, m / (1 << info.nmant), m)
x2 = lax.convert_element_type(x2, onp.int32)
x &= ~(mask << info.nmant)
x |= ((lax.convert_element_type(x2, int_type) + bias) << info.nmant)
x = lax.convert_element_type(m, dtype) * lax.bitcast_convert_type(x, dtype)
# underflow
x = where(underflow_cond, zeros_like(x, dtype=dtype), x)
# overflow
x = where(overflow_cond, lax.sign(x1) * full_like(x, onp.inf), x)
# ldexp(x1, x2) = x1 for x1 = inf, -inf, nan, 0
return where(isinf(x1) | isnan(x1) | (x1 == 0), x1, x)
@_wraps(onp.frexp)
@jit
def frexp(x):
x = asarray(x)
if issubdtype(x.dtype, complexfloating):
raise TypeError("frexp does not support complex-valued inputs")
elif not issubdtype(x.dtype, floating):
x = lax.convert_element_type(x, float_)
dtype = _dtype(x)
info = finfo(dtype)
mask = (1 << info.nexp) - 1
bias = ((1 << info.nexp) - 1) >> 1
int_type = _INT_DTYPES[info.bits]
x1, x2 = _normalize_float(x)
print(x1, x2)
x1 = lax.bitcast_convert_type(x1, int_type)
x2 += ((x1 >> info.nmant) & mask) - bias + 1
x1 &= ~(mask << info.nmant)
x1 |= (bias - 1) << info.nmant
x1 = lax.bitcast_convert_type(x1, dtype)
cond = isinf(x) | isnan(x) | (x == 0)
x2 = where(cond, zeros_like(x2), x2)
return where(cond, x, x1), lax.convert_element_type(x2, int32)
@_wraps(onp.remainder)
def remainder(x1, x2):
x1, x2 = _promote_args("remainder", x1, x2)
zero = _constant_like(x1, 0)
trunc_mod = lax.rem(x1, x2)
trunc_mod_not_zero = lax.ne(trunc_mod, zero)
do_plus = lax.bitwise_and(
lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)
return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)
mod = remainder
fmod = _wraps(onp.fmod)(lambda x1, x2: lax.rem(x1, x2))
@_wraps(onp.cbrt)
def cbrt(x):
x, = _promote_dtypes_inexact(x)
return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))
@_wraps(onp.square)
def square(x): return lax.mul(x, x)
@_wraps(onp.deg2rad)
def deg2rad(x):
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, pi / 180))
@_wraps(onp.rad2deg)
def rad2deg(x):
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, 180 / pi))
degrees = rad2deg
radians = deg2rad
@_wraps(onp.heaviside)
def heaviside(x1, x2):
x1, x2 = _promote_dtypes_inexact(x1, x2)
zero = lax._const(x1, 0)
return where(lax.lt(x1, zero), zero,
where(lax.gt(x1, zero), lax._const(x1, 1), x2))
@_wraps(onp.hypot)
def hypot(x1, x2):
x1, x2 = _promote_dtypes_inexact(x1, x2)
return lax.sqrt(x1*x1 + x2*x2)
@_wraps(onp.reciprocal)
def reciprocal(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax._const(x, 1), x)
@_wraps(onp.sinc, update_doc=False)
def sinc(x):
x, = _promote_dtypes_inexact(x)
eq_zero = lax.eq(x, lax._const(x, 0))
safe_x = where(eq_zero, lax._const(x, 0), x)
pi_x = lax.mul(lax._const(x, pi), safe_x)
return where(eq_zero,
lax._const(x, 1), lax.div(lax.sin(pi_x), pi_x))
@_wraps(onp.transpose)
def transpose(a, axes=None):
axes = onp.arange(ndim(a))[::-1] if axes is None else axes
return lax.transpose(a, axes)
@_wraps(onp.rot90)
def rot90(m, k=1, axes=(0, 1)):
ax1, ax2 = axes
ax1 = _canonicalize_axis(ax1, m.ndim)
ax2 = _canonicalize_axis(ax2, m.ndim)
if ax1 == ax2:
raise ValueError("Axes must be different") # same as numpy error
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = list(range(m.ndim))
perm[ax1], perm[ax2] = perm[ax2], perm[ax1]
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@_wraps(onp.flip)
def flip(m, axis=None):
if axis is None:
return lax.rev(m, list(range(len(m.shape))))
return lax.rev(m, [_canonicalize_axis(axis, len(m.shape))])
@_wraps(onp.fliplr)
def fliplr(m):
return flip(m, 1)
@_wraps(onp.flipud)
def flipud(m):
return flip(m, 0)
@_wraps(onp.conjugate)
def conjugate(x):
return lax.conj(x) if iscomplexobj(x) else x
conj = conjugate
@_wraps(onp.imag)
def imag(val):
return lax.imag(val) if iscomplexobj(val) else zeros_like(val)
@_wraps(onp.real)
def real(val):
return lax.real(val) if iscomplexobj(val) else val
@_wraps(onp.iscomplex)
def iscomplex(x):
i = imag(x)
return lax.ne(i, lax._const(i, 0))
@_wraps(onp.isreal)
def isreal(x):
i = imag(x)
return lax.eq(i, lax._const(i, 0))
@_wraps(onp.angle)
def angle(z):
re = real(z)
im = imag(z)
dtype = _dtype(re)
if not issubdtype(dtype, inexact) or (
issubdtype(_dtype(z), floating) and ndim(z) == 0):
dtype = dtypes.canonicalize_dtype(float_)
re = lax.convert_element_type(re, dtype)
im = lax.convert_element_type(im, dtype)
return lax.atan2(im, re)
@_wraps(onp.diff)
def diff(a, n=1, axis=-1,):
if not isinstance(a, ndarray) or a.ndim == 0:
return a
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = a.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = not_equal if a.dtype == onp.bool_ else subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
@partial(jit, static_argnums=1)
def _gradient(a, axis):
def gradient_along_axis(a, axis):
sliced = partial(lax.slice_in_dim, a, axis=axis)
a_grad = concatenate((
sliced(1, 2) - sliced(0, 1),
(sliced(2, None) - sliced(0, -2)) * 0.5,
sliced(-1, None) - sliced(-2, -1),
), axis)
return a_grad
if axis is None:
axis = range(a.ndim)
else:
if isinstance(axis, int):
axis = (axis,)
if not isinstance(axis, tuple) and not isinstance(axis, list):
raise ValueError("Give `axis` either as int or iterable")
axis = [_canonicalize_axis(i, a.ndim) for i in axis]
if min([s for i, s in enumerate(a.shape) if i in axis]) < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient")
# TODO: use jax.lax loop tools if possible
a_grad = [gradient_along_axis(a, ax) for ax in axis]
if len(axis) == 1:
a_grad = a_grad[0]
return a_grad
@_wraps(onp.gradient)
def gradient(a, *args, **kwargs):
axis = kwargs.pop("axis", None)
if not len(args) == 0:
raise ValueError("*args (sample distances) not implemented")
if not len(kwargs) == 0:
raise ValueError("Only `axis` keyword is implemented")
return _gradient(a, axis)
@_wraps(onp.isrealobj)
def isrealobj(x):
return not iscomplexobj(x)
@_wraps(onp.reshape)
def reshape(a, newshape, order="C"):
try:
return a.reshape(newshape, order=order) # forward to method for ndarrays
except AttributeError:
return _reshape(a, newshape, order=order)
def _compute_newshape(a, newshape):
"""Fixes a -1 value in newshape, if present."""
# other errors, like having more than one -1, are caught downstream
newsize = _prod(newshape)
if newsize < 0:
fix = a.size // -newsize
return [d if d != -1 else fix for d in newshape]
else:
return newshape
def _reshape(a, newshape, order="C"):
computed_newshape = _compute_newshape(a, newshape)
if order == "C":
return lax.reshape(a, computed_newshape, None)
elif order == "F":
dims = onp.arange(ndim(a))[::-1]
return lax.reshape(a, computed_newshape[::-1], dims).T
elif order == "A":
raise NotImplementedError("np.reshape order=A is not implemented.")
else:
raise ValueError("Unexpected value for 'order' argument: {}.".format(order))
def _reshape_method(a, *newshape, **kwargs):
order = kwargs.pop("order", "C")
if len(kwargs) == 1:
invalid_kwarg, = kwargs
msg = "'{}' is an invalid keyword argument for this function"
raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error
elif kwargs:
invalid_kwargs = "'{}'".format("'".join(kwargs))
msg = "{} are invalid keyword arguments for this function"
raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return _reshape(a, newshape, order=order)
@_wraps(onp.ravel)
def ravel(a, order="C"):
if order == "K":
raise NotImplementedError("Ravel not implemented for order='K'.")
return reshape(a, (size(a),), order)
@_wraps(onp.squeeze)
def squeeze(a, axis=None):
shape_a = shape(a)
if axis is None:
if 1 not in shape_a:
return a
newshape = [d for d in shape_a if d != 1]
else:
if isinstance(axis, int):
axis = (axis,)
axis = frozenset(_canonicalize_axis(i, ndim(a)) for i in axis)
if _any(shape_a[a] != 1 for a in axis):
raise ValueError("cannot select an axis to squeeze out which has size "
"not equal to one")
newshape = [d for i, d in enumerate(shape_a)
if d != 1 or i not in axis]
return lax.reshape(a, newshape)
@_wraps(onp.expand_dims)
def expand_dims(a, axis):
shape = _shape(a)
axis = _canonicalize_axis(axis, ndim(a) + 1)
return lax.reshape(a, shape[:axis] + (1,) + shape[axis:])
@_wraps(onp.swapaxes)
def swapaxes(a, axis1, axis2):
perm = onp.arange(ndim(a))
perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
return lax.transpose(a, perm)
@_wraps(onp.moveaxis)
def moveaxis(a, source, destination):
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)
destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)
if len(source) != len(destination):
raise ValueError("Inconsistent number of elements: {} vs {}"
.format(len(source), len(destination)))
perm = [i for i in range(ndim(a)) if i not in source]
for dest, src in sorted(zip(destination, source)):
perm.insert(dest, src)
return lax.transpose(a, perm)
@_wraps(onp.isclose)
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
a, b = _promote_args("isclose", asarray(a), asarray(b))
dtype = _dtype(a)
if issubdtype(dtype, inexact):
if issubdtype(dtype, complexfloating):
dtype = _complex_elem_type(dtype)
rtol = lax.convert_element_type(rtol, dtype)
atol = lax.convert_element_type(atol, dtype)
out = lax.le(
lax.abs(lax.sub(a, b)),
lax.add(atol, lax.mul(rtol, lax.abs(b))))
# This corrects the comparisons for infinite and nan values
a_inf = isinf(a)
b_inf = isinf(b)
any_inf = logical_or(a_inf, b_inf)
both_inf = logical_and(a_inf, b_inf)
# Make all elements where either a or b are infinite to False
out = logical_and(out, logical_not(any_inf))
# Make all elements where both a or b are the same inf to True
same_value = lax.eq(a, b)
same_inf = logical_and(both_inf, same_value)
out = logical_or(out, same_inf)
# Make all elements where either a or b is NaN to False
a_nan = isnan(a)
b_nan = isnan(b)
any_nan = logical_or(a_nan, b_nan)
out = logical_and(out, logical_not(any_nan))
if equal_nan:
# Make all elements where both a and b is NaN to True
both_nan = logical_and(a_nan, b_nan)
out = logical_or(out, both_nan)
return _maybe_numpy_1_13_isclose_behavior(a, out)
else:
return lax.eq(a, b)
numpy_version = tuple(map(int, onp.version.version.split('.')[:2]))
if numpy_version < (1, 14):
# see discussion at https://github.com/numpy/numpy/pull/9720
def _maybe_numpy_1_13_isclose_behavior(a, out):
if size(out) == 1 and issubdtype(_dtype(a), complexfloating):
return lax.reshape(out, (1,))
else:
return out
else:
def _maybe_numpy_1_13_isclose_behavior(a, out):
return out
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@jit
def _where(condition, x=None, y=None):
if x is None or y is None:
raise ValueError("Either both or neither of the x and y arguments should "
"be provided to jax.numpy.where, got {} and {}."
.format(x, y))
if not issubdtype(_dtype(condition), bool_):
condition = lax.ne(condition, zeros_like(condition))
x, y = _promote_dtypes(x, y)
condition, x, y = broadcast_arrays(condition, x, y)
return lax.select(condition, x, y) if onp.size(x) else x
_WHERE_DOC = """\
At present, JAX does not support JIT-compilation of the single-argument form
of :py:func:`jax.numpy.where` because its output shape is data-dependent. The
three-argument form does not have a data-dependent shape and can be JIT-compiled
successfully.
"""
@_wraps(onp.where, update_doc=False, lax_description=_WHERE_DOC)
def where(condition, x=None, y=None):
if x is None and y is None:
return nonzero(asarray(condition))
else:
return _where(condition, x, y)
@_wraps(onp.select)
def select(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
msg = "condlist must have length equal to choicelist ({} vs {})"
raise ValueError(msg.format(len(condlist), len(choicelist)))
if len(condlist) == 0:
raise ValueError("condlist must be non-empty")
choices = _promote_dtypes(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
def broadcast_arrays(*args):
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [shape(arg) for arg in args]
if len(set(shapes)) == 1:
return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)
for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [broadcast_to(arg, result_shape) for arg in args]
def broadcast_to(arr, shape):
"""Like Numpy's broadcast_to but doesn't necessarily return views."""
arr = arr if isinstance(arr, ndarray) else array(arr)
shape = tuple(map(int, shape)) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = onp.equal(arr_shape, shape[nlead:]) | onp.equal(arr_shape, 1)
if nlead < 0 or not onp.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
diff, = onp.where(onp.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(onp.delete(onp.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, diff), shape, kept_dims)
@_wraps(onp.split)
def split(ary, indices_or_sections, axis=0):
dummy_val = onp.broadcast_to(0, ary.shape) # zero strides
subarrays = onp.split(dummy_val, indices_or_sections, axis) # shapes
split_indices = onp.cumsum([0] + [onp.shape(sub)[axis] for sub in subarrays])
starts, ends = [0] * ndim(ary), shape(ary)
_subval = lambda x, i, v: subvals(x, [(i, v)])
return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))
for start, end in zip(split_indices[:-1], split_indices[1:])]
def _split_on_axis(onp_fun, axis):
@_wraps(onp_fun, update_doc=False)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis(onp.vsplit, axis=0)
hsplit = _split_on_axis(onp.hsplit, axis=1)
dsplit = _split_on_axis(onp.dsplit, axis=2)
@_wraps(onp.clip)
def clip(a, a_min=None, a_max=None):
if a_min is None and a_max is None:
raise ValueError("At most one of a_min and a_max may be None")
if a_min is not None:
if _dtype(a_min) != _dtype(a):
a_min = lax.convert_element_type(a_min, _dtype(a))
a = maximum(a_min, a)
if a_max is not None:
if _dtype(a_max) != _dtype(a):
a_max = lax.convert_element_type(a_max, _dtype(a))
a = minimum(a_max, a)
return a
def _dtype_info(dtype):
"""Helper function for to get dtype info needed for clipping."""
if issubdtype(dtype, integer):
return iinfo(dtype)
return finfo(dtype)
def _round_to_nearest_even(x):
half = lax._const(x, 0.5)
one = lax._const(x, 1)
round_val = lax.floor(x)
fraction = x - round_val
nearest_even_int = lax.sub(
round_val, lax.mul(lax._const(x, 2), lax.floor(lax.mul(half, x))))
is_odd = lax.eq(nearest_even_int, one)
return lax.select(
lax.bitwise_or(lax.gt(fraction, half),
lax.bitwise_and(lax.eq(fraction, half), is_odd)),
lax.add(round_val, one), round_val)
@_wraps(onp.round, update_doc=False)
def round(a, decimals=0):
dtype = _dtype(a)
if issubdtype(dtype, integer):
if decimals < 0:
raise NotImplementedError(
"integer np.round not implemented for decimals < 0")
return a # no-op on integer types
def _round_float(x):
if decimals == 0:
return _round_to_nearest_even(x)
# TODO(phawkins): the strategy of rescaling the value isn't necessarily a
# good one since we may be left with an incorrectly rounded value at the
# end due to precision problems. As a workaround for float16, convert to
# float32,
x = lax.convert_element_type(x, onp.float32) if dtype == onp.float16 else x
factor = _constant_like(x, 10 ** decimals)
out = lax.div(_round_to_nearest_even(lax.mul(x, factor)), factor)
return lax.convert_element_type(out, dtype) if dtype == onp.float16 else out
if issubdtype(dtype, complexfloating):
return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))
else:
return _round_float(a)
around = round
@_wraps(onp.fix)
def fix(x, out=None):
if out is not None:
raise ValueError("fix does not support the `out` argument.")
zero = lax._const(x, 0)
return where(lax.ge(x, zero), lax.floor(x), lax.ceil(x))
@_wraps(onp.isfinite)
def isfinite(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.is_finite(x)
elif issubdtype(dtype, complexfloating):
return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))
else:
return full_like(x, True, dtype=bool_)
@_wraps(onp.isinf)
def isinf(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(lax.abs(x), _constant_like(x, inf))
elif issubdtype(dtype, complexfloating):
re = lax.real(x)
im = lax.imag(x)
return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),
lax.eq(lax.abs(im), _constant_like(im, inf)))
else:
return full_like(x, False, dtype=bool_)
def _isposneginf(infinity, x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(x, _constant_like(x, infinity))
elif issubdtype(dtype, complexfloating):
raise ValueError("isposinf/isneginf are not well defined for complex types")
else:
return full_like(x, False, dtype=bool_)
isposinf = _wraps(onp.isposinf)(partial(_isposneginf, inf))
isneginf = _wraps(onp.isneginf)(partial(_isposneginf, -inf))
@_wraps(onp.isnan)
def isnan(x):
return lax.bitwise_and(lax.bitwise_not(isfinite(x)),
lax.bitwise_not(isinf(x)))
@_wraps(onp.nan_to_num)
def nan_to_num(x, copy=True):
del copy
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
return lax.complex(nan_to_num(lax.real(x)), nan_to_num(lax.imag(x)))
info = finfo(dtypes.canonicalize_dtype(dtype))
x = where(isnan(x), _constant_like(x, 0), x)
x = where(isposinf(x), _constant_like(x, info.max), x)
x = where(isneginf(x), _constant_like(x, info.min), x)
return x
### Reducers
def _make_reduction(np_fun, op, init_val, preproc=None, bool_op=None,
upcast_f16_for_computation=False):
"""Creates reduction function given a binary operation and monoid identity."""
bool_op = bool_op or op
@_wraps(np_fun)
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support the `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = dtype or _dtype(np_fun(onp.ones((), dtype=_dtype(a))))
if upcast_f16_for_computation and issubdtype(result_dtype, inexact):
computation_dtype = promote_types(result_dtype, float32)
else:
computation_dtype = result_dtype
a = lax.convert_element_type(a, computation_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val),
op if computation_dtype != onp.bool_ else bool_op, dims)
if keepdims:
shape_with_singletons = subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
return lax.convert_element_type(result, dtype or result_dtype)
return reduction
def _reduction_dims(a, axis):
if axis is None:
return onp.arange(ndim(a))
elif isinstance(axis, (onp.ndarray, tuple, list)):
return tuple(_canonicalize_axis(x, ndim(a)) for x in axis)
elif isinstance(axis, int):
return (_canonicalize_axis(axis, ndim(a)),)
else:
raise TypeError("Unexpected type of axis argument: {}".format(type(axis)))
def _reduction_init_val(a, init_val):
a_dtype = dtypes.canonicalize_dtype(_dtype(a))
if a_dtype == 'bool':
return onp.array(init_val > 0, dtype=a_dtype)
try:
return onp.array(init_val, dtype=a_dtype)
except OverflowError:
assert issubdtype(a_dtype, integer)
sign, info = onp.sign(init_val), iinfo(a_dtype)
return onp.array(info.min if sign < 0 else info.max, dtype=a_dtype)
_cast_to_bool = partial(lax.convert_element_type, new_dtype=bool_)
sum = _make_reduction(onp.sum, lax.add, 0, upcast_f16_for_computation=True,
bool_op=lax.bitwise_or)
product = prod = _make_reduction(onp.prod, lax.mul, 1, bool_op=lax.bitwise_and,
upcast_f16_for_computation=True)
amax = max = _make_reduction(onp.max, lax.max, -onp.inf)
amin = min = _make_reduction(onp.min, lax.min, onp.inf)
all = alltrue = _make_reduction(onp.all, lax.bitwise_and, True, _cast_to_bool)
any = sometrue = _make_reduction(onp.any, lax.bitwise_or, False, _cast_to_bool)
@_wraps(onp.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("mean does not support the `out` argument.")
if axis is None:
normalizer = size(a)
else:
normalizer = onp.prod(onp.take(shape(a), axis))
if dtype is None:
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
dtype = float_
else:
dtype = _dtype(a)
return lax.div(
sum(a, axis, dtype=dtype, keepdims=keepdims),
lax.convert_element_type(normalizer, dtype))
@_wraps(onp.average)
def average(a, axis=None, weights=None, returned=False):
a = asarray(a)
if weights is None: # Treat all weights as 1
avg = mean(a, axis=axis)
if axis is None:
weights_sum = full((), size(a), dtype=avg.dtype)
else:
weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)
else:
weights = asarray(weights)
if issubdtype(a.dtype, inexact):
out_dtype = result_type(a.dtype, weights.dtype)
else:
out_dtype = result_type(a.dtype, weights.dtype, float_)
out_dtype = dtypes.canonicalize_dtype(out_dtype)
a_shape = shape(a)
a_ndim = len(a_shape)
weights_shape = shape(weights)
axis = None if axis is None else _canonicalize_axis(axis, a_ndim)
if a_shape != weights_shape:
# Make sure the dimensions work out
if axis is None:
raise ValueError("Axis must be specified when shapes of a and "
"weights differ.")
if len(weights_shape) != 1:
raise ValueError("1D weights expected when shapes of a and "
"weights differ.")
if weights_shape[0] != a_shape[axis]:
raise ValueError("Length of weights not "
"compatible with specified axis.")
weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)
weights = moveaxis(weights, -1, axis)
weights_sum = sum(weights, axis=axis, dtype=out_dtype)
avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum
if returned:
if avg.shape != weights_sum.shape:
weights_sum = broadcast_to(weights_sum, avg.shape)
return avg, weights_sum
return avg
@_wraps(onp.var)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("var does not support the `out` argument.")
a_dtype = _dtype(a)
if dtype:
a_dtype = promote_types(a_dtype, dtype)
else:
if not issubdtype(a_dtype, inexact):
dtype = a_dtype = float_
else:
dtype = _complex_elem_type(a_dtype)
a_dtype = promote_types(a_dtype, float32)
a_mean = mean(a, axis, dtype=a_dtype, keepdims=True)
centered = a - a_mean
if issubdtype(centered.dtype, complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
else:
centered = lax.square(centered)
if axis is None:
normalizer = size(a)
else:
normalizer = onp.prod(onp.take(shape(a), axis))
normalizer = normalizer - ddof
result = sum(centered, axis, keepdims=keepdims)
out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))
return lax.convert_element_type(out, dtype)
@_wraps(onp.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("std does not support the `out` argument.")
return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
@_wraps(onp.ptp)
def ptp(a, axis=None, out=None, keepdims=False):
if out is not None:
raise ValueError("ptp does not support the `out` argument.")
x = amax(a, axis=axis, keepdims=keepdims)
y = amin(a, axis=axis, keepdims=keepdims)
return lax.sub(x, y)
@_wraps(onp.allclose)
def allclose(a, b, rtol=1e-05, atol=1e-08):
return all(isclose(a, b, rtol, atol))
@_wraps(onp.count_nonzero)
def count_nonzero(a, axis=None):
return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,
dtype=dtypes.canonicalize_dtype(onp.int_))
_NONZERO_DOC = """\
At present, JAX does not support JIT-compilation of :py:func:`jax.numpy.nonzero`
because its output shape is data-dependent.
"""
@_wraps(onp.nonzero, lax_description=_NONZERO_DOC)
def nonzero(a):
# Note: this function cannot be jitted because its output has a dynamic
# shape.
a = atleast_1d(a)
dims = shape(a)
ndims = len(dims)
ds = [lax.broadcasted_iota(int_, dims + (1,), i) for i in range(ndims)]
d = concatenate(ds, axis=-1)
indexes = d[a != 0]
return tuple(indexes[..., i] for i in range(ndims))
def _make_nan_reduction(onp_reduction, np_reduction, init_val, nan_if_all_nan):
@_wraps(onp_reduction)
def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs):
out = np_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),
axis=axis, out=out, keepdims=keepdims, **kwargs)
if nan_if_all_nan:
return where(all(isnan(a), axis=axis, keepdims=keepdims),
_constant_like(a, nan), out)
else:
return out
return nan_reduction
nanmin = _make_nan_reduction(onp.nanmin, min, inf, nan_if_all_nan=True)
nanmax = _make_nan_reduction(onp.nanmax, max, -inf, nan_if_all_nan=True)
nansum = _make_nan_reduction(onp.nansum, sum, 0, nan_if_all_nan=False)
nanprod = _make_nan_reduction(onp.nanprod, prod, 1, nan_if_all_nan=False)
@_wraps(onp.nanmean)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("nanmean does not support the `out` argument.")
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
return mean(a, axis, dtype, out, keepdims)
if dtype is None:
dtype = _dtype(a)
nan_mask = logical_not(isnan(a))
normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)
normalizer = lax.convert_element_type(normalizer, dtype)
td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)
return td
def _make_cumulative_reduction(onp_reduction, reduction, squash_nan=False):
# We want to allow XLA to fuse the pad and reduce-window operators to
# avoid materializing the padded output.
# Consider removing `jit` once again if reduce-window is generalized to
# support arbitrary padding.
@partial(jit, static_argnums=(1, 2))
def _cumulative_reduction(a, axis, dtype):
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if squash_nan:
a = where(isnan(a), _constant_like(a, unit), a)
if not dtype and _dtype(a) == bool_:
dtype = int_
if dtype:
a = lax.convert_element_type(a, dtype)
return reduction(a, axis)
@_wraps(onp_reduction)
def cumulative_reduction(a, axis=None, dtype=None):
# jit doesn't support kwargs as static_args.
return _cumulative_reduction(a, axis, dtype)
return cumulative_reduction
cumsum = _make_cumulative_reduction(onp.cumsum, lax.cumsum, squash_nan=False)
cumprod = _make_cumulative_reduction(onp.cumprod, lax.cumprod, squash_nan=False)
cumproduct = cumprod
nancumsum = _make_cumulative_reduction(onp.nancumsum, lax.cumsum,
squash_nan=True)
nancumprod = _make_cumulative_reduction(onp.nancumprod, lax.cumprod,
squash_nan=True)
### Array-creation functions
def _check_no_padding(axis_padding, mode):
if (axis_padding[0] > 0 or axis_padding[1] > 0):
msg = "Cannot apply '{}' padding to empty axis"
raise ValueError(msg.format(mode))
def _pad_constant(array, pad_width, constant_values):
nd = ndim(array)
constant_values = broadcast_to(asarray(constant_values), (nd, 2))
constant_values = lax.convert_element_type(constant_values, array.dtype)
for i in range(nd):
widths = [(0, 0, 0)] * nd
widths[i] = (pad_width[i, 0], 0, 0)
array = lax.pad(array, constant_values[i, 0], widths)
widths[i] = (0, pad_width[i, 1], 0)
array = lax.pad(array, constant_values[i, 1], widths)
return array
def _pad_wrap(array, pad_width):
for i in range(ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "wrap")
continue
size = array.shape[i]
repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)
total_repeats = repeats.sum() + 1
parts = []
if left_remainder:
parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]
parts += total_repeats * [array]
if right_remainder:
parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]
array = lax.concatenate(parts, dimension=i)
return array
def _pad_symmetric_or_reflect(array, pad_width, mode):
assert mode in ("symmetric", "reflect")
for i in range(ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], mode)
continue
n = array.shape[i]
rarray = lax.rev(array, dimensions=(i,))
offset = 1 if (mode == "reflect" and n > 1) else 0
def build_padding(padding, forward):
xs = []
delta = n - offset
while padding > delta:
padding -= delta
p = array if forward else rarray
xs.append(lax.slice_in_dim(p, offset, n, axis=i))
forward = not forward
if padding > 0:
x = lax.slice_in_dim(array if forward else rarray, offset,
padding + offset, axis=i)
xs.append(x)
return xs
parts = reversed(build_padding(pad_width[i, 0], forward=True))
parts = [lax.rev(x, dimensions=(i,)) for x in parts]
parts += [array]
parts += build_padding(pad_width[i, 1], forward=False)
array = lax.concatenate(parts, dimension=i)
return array
def _pad_edge(array, pad_width):
nd = ndim(array)
for i in range(nd):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], mode)
continue
n = array.shape[i]
npad_before, npad_after = pad_width[i]
edge_before = lax.slice_in_dim(array, 0, 1, axis=i)
pad_before = repeat(edge_before, npad_before, axis=i)
edge_after = lax.slice_in_dim(array, n-1, n, axis=i)
pad_after = repeat(edge_after, npad_after, axis=i)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
@partial(jit, static_argnums=(1, 2))
def _pad(array, pad_width, mode, constant_values):
array = asarray(array)
nd = ndim(array)
pad_width = onp.broadcast_to(onp.asarray(pad_width), (nd, 2))
if any(pad_width < 0):
raise ValueError("index can't contain negative values")
if mode == "constant":
return _pad_constant(array, pad_width, constant_values)
elif mode == "wrap":
return _pad_wrap(array, pad_width)
elif mode in ("symmetric", "reflect"):
return _pad_symmetric_or_reflect(array, pad_width, mode)
elif mode == "edge":
return _pad_edge(array, pad_width)
else:
msg = "Unimplemented padding mode '{}' for np.pad."
raise NotImplementedError(msg.format(mode))
@_wraps(onp.pad)
def pad(array, pad_width, mode='constant', constant_values=0):
if isinstance(pad_width, list):
pad_width = tuple(pad_width)
return _pad(array, pad_width, mode, constant_values)
@_wraps(onp.stack)
def stack(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to stack.")
shape0 = shape(arrays[0])
axis = _canonicalize_axis(axis, len(shape0) + 1)
new_shape = list(shape0)
new_shape.insert(axis, 1)
new_arrays = []
for a in arrays:
if shape(a) != shape0:
raise ValueError("All input arrays must have the same shape.")
new_arrays.append(reshape(a, new_shape))
return concatenate(new_arrays, axis=axis)
@_wraps(onp.tile)
def tile(a, reps):
if isinstance(reps, int):
reps = (reps,)
a = reshape(a, (1,) * (len(reps) - ndim(a)) + shape(a))
reps = (1,) * (ndim(a) - len(reps)) + tuple(reps)
for i, rep in enumerate(reps):
a = concatenate([a] * int(rep), axis=i)
return a
@_wraps(onp.concatenate)
def concatenate(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to concatenate.")
if ndim(arrays[0]) == 0:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
axis = _canonicalize_axis(axis, ndim(arrays[0]))
arrays = _promote_dtypes(*arrays)
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/google/jax/issues/653).
k = 16
if len(arrays) == 1:
return array(arrays[0])
else:
while len(arrays) > 1:
arrays = [lax.concatenate(arrays[i:i+k], axis)
for i in range(0, len(arrays), k)]
return arrays[0]
@_wraps(onp.vstack)
def vstack(tup):
return concatenate([atleast_2d(m) for m in tup], axis=0)
row_stack = vstack
@_wraps(onp.hstack)
def hstack(tup):
arrs = [atleast_1d(m) for m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
@_wraps(onp.dstack)
def dstack(tup):
return concatenate([atleast_3d(m) for m in tup], axis=2)
@_wraps(onp.column_stack)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = arr.reshape((-1, 1))
arrays.append(arr)
return concatenate(arrays, 1)
def _atleast_nd(x, n):
m = ndim(x)
return lax.broadcast(x, (1,) * (n - m)) if m < n else x
def _block(xs):
if isinstance(xs, tuple):
raise ValueError("jax.numpy.block does not allow tuples, got {}"
.format(xs))
elif isinstance(xs, list):
if len(xs) == 0:
raise ValueError("jax.numpy.block does not allow empty list arguments")
xs, depths = unzip2([_block(x) for x in xs])
if _any(d != depths[0] for d in depths[1:]):
raise ValueError("Mismatched list depths in jax.numpy.block")
rank = _max(depths[0], _max(ndim(x) for x in xs))
xs = [_atleast_nd(x, rank) for x in xs]
return concatenate(xs, axis=-depths[0]), depths[0] + 1
else:
return asarray(xs), 1
@_wraps(onp.block)
@jit
def block(arrays):
out, _ = _block(arrays)
return out
@_wraps(onp.atleast_1d, update_doc=False)
def atleast_1d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 1 else reshape(arr, -1)
else:
return [atleast_1d(arr) for arr in arys]
@_wraps(onp.atleast_2d, update_doc=False)
def atleast_2d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 2 else reshape(arr, (1, -1))
else:
return [atleast_2d(arr) for arr in arys]
@_wraps(onp.atleast_3d, update_doc=False)
def atleast_3d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) <= 1:
arr = reshape(arr, (1, -1, 1))
elif ndim(arr) == 2:
arr = reshape(arr, shape(arr) + (1,))
return arr
else:
return [atleast_3d(arr) for arr in arys]
@_wraps(onp.array)
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
lax._check_user_dtype_supported(dtype, "array")
if isinstance(object, ndarray):
if dtype and _dtype(object) != dtypes.canonicalize_dtype(dtype):
out = lax.convert_element_type(object, dtype)
else:
out = device_put(object)
elif isscalar(object):
out = lax.reshape(object, ())
if dtype and _dtype(out) != dtypes.canonicalize_dtype(dtype):
out = lax.convert_element_type(out, dtype)
elif hasattr(object, '__array__'):
# this case is for duck-typed handling of objects that implement `__array__`
out = array(object.__array__(), dtype and dtypes.canonicalize_dtype(dtype))
elif isinstance(object, (list, tuple)):
if object:
out = stack([array(elt, dtype=dtype) for elt in object])
else:
out = onp.array([], dtype or float_)
else:
try:
view = memoryview(object)
except TypeError:
pass # `object` does not support the buffer interface.
else:
return array(onp.asarray(view), dtype, copy)
raise TypeError("Unexpected input type for array: {}".format(type(object)))
if ndmin > ndim(out):
out = lax.reshape(out, (1,) * (ndmin - ndim(out)) + shape(out))
return out
@_wraps(onp.asarray)
def asarray(a, dtype=None, order=None):
lax._check_user_dtype_supported(dtype, "asarray")
return array(a, dtype=dtype, copy=False, order=order)
@_wraps(onp.zeros_like)
def zeros_like(x, dtype=None):
lax._check_user_dtype_supported(dtype, "zeros_like")
return lax.full_like(x, 0, dtype)
@_wraps(onp.ones_like)
def ones_like(x, dtype=None):
lax._check_user_dtype_supported(dtype, "ones_like")
return lax.full_like(x, 1, dtype)
@_wraps(onp.full)
def full(shape, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full")
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, fill_value, dtype)
@_wraps(onp.full_like)
def full_like(a, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full_like")
return lax.full_like(a, fill_value, dtype)
@_wraps(onp.zeros)
def zeros(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "zeros")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 0, dtype)
@_wraps(onp.ones)
def ones(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "ones")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 1, dtype)
@_wraps(onp.array_equal)
def array_equal(a1, a2):
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
return shape(a1) == shape(a2) and all(asarray(a1 == a2))
# We can't create uninitialized arrays in XLA; use zeros for empty.
empty_like = zeros_like
empty = zeros
@_wraps(onp.eye)
def eye(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "eye")
dtype = float_ if dtype is None else dtype
M = N if M is None else M
k = int(k)
if N < 0 or M < 0:
msg = "negative dimensions are not allowed, got {} and {}"
raise ValueError(msg.format(N, M))
if k is not None:
k_dtype = _dtype(k)
if not issubdtype(k_dtype, integer):
msg = "eye argument `k` must be of integer dtype, got {}"
raise TypeError(msg.format(k_dtype))
return lax._eye(dtype, (N, M), k)
@_wraps(onp.identity)
def identity(n, dtype=None):
lax._check_user_dtype_supported(dtype, "identity")
return eye(n, dtype=dtype)
@_wraps(onp.arange)
def arange(start, stop=None, step=None, dtype=None):
lax._check_user_dtype_supported(dtype, "arange")
if stop is None and step is None:
dtype = dtype or _dtype(start)
return lax.iota(dtype, start) # avoids materializing
else:
return array(onp.arange(start, stop=stop, step=step, dtype=dtype))
def _wrap_numpy_nullary_function(f):
"""Adapts `f` to return a DeviceArray instead of an onp.ndarray.
`f` cannot have any non-static array arguments.
"""
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return asarray(f(*args, **kwargs))
return wrapper
@_wraps(onp.linspace)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""Implementation of linspace differentiable in start and stop args."""
lax._check_user_dtype_supported(dtype, "linspace")
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
dt = result_type(start, stop, float(num))
dtype = dtype or dt
bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
broadcast_start = broadcast_to(start, bounds_shape)
axis = len(bounds_shape) + axis + 1 if axis < 0 else axis
bounds_shape.insert(axis, 1)
iota_shape = [1,] * len(bounds_shape)
iota_shape[axis] = num
div = (num - 1) if endpoint else num
if num > 1:
delta = lax.convert_element_type(stop - start, dt) / div
out = (reshape(broadcast_start, bounds_shape) +
reshape(lax.iota(dt, num), iota_shape) *
reshape(delta, bounds_shape))
elif num == 1:
delta = nan if endpoint else lax.convert_element_type(stop - start, dt)
out = reshape(broadcast_start, bounds_shape)
else: # num == 0 degenerate case, match onp behavior
empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
empty_shape.insert(axis, 0)
delta = nan
out = reshape(array([], dtype=dt), empty_shape)
if retstep:
return lax.convert_element_type(out, dtype), delta
else:
return lax.convert_element_type(out, dtype)
@_wraps(onp.logspace)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
"""Implementation of logspace differentiable in start and stop args."""
dtype = dtype or result_type(start, stop, float_)
computation_dtype = promote_types(dtype, float_)
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
lin = linspace(start, stop, num,
endpoint=endpoint, retstep=False, dtype=None, axis=axis)
return lax.convert_element_type(power(base, lin), dtype)
@_wraps(onp.geomspace)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""Implementation of geomspace differentiable in start and stop args."""
dtype = dtype or result_type(start, stop, float(num), zeros((), dtype))
computation_dtype = promote_types(dtype, float32)
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
# follow the numpy geomspace convention for negative and complex endpoints
signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2
res = signflip * logspace(log10(signflip * start),
log10(signflip * stop), num,
endpoint=endpoint, base=10.0,
dtype=computation_dtype, axis=0)
if axis != 0:
res = moveaxis(res, 0, axis)
return lax.convert_element_type(res, dtype)
@_wraps(onp.meshgrid)
def meshgrid(*args, **kwargs):
indexing = kwargs.get("indexing", "xy")
sparse = kwargs.get("sparse", False)
copy = kwargs.get("copy", True)
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
args = list(args)
if indexing == "xy":
if len(args) >= 2:
args[0], args[1] = args[1], args[0]
elif indexing != "ij":
raise ValueError("Valid values for indexing are 'xy' and 'ij', got {}"
.format(indexing))
shape = []
for i, a in enumerate(args):
args[i] = a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.meshgrid must be 1D, got shape {}"
raise ValueError(msg.format(a.shape))
shape.append(1 if sparse else a.shape[0])
output = []
for i, a in enumerate(args):
a = asarray(a)
s = shape
if sparse:
s = list(s)
s[i] = a.shape[0]
output.append(lax.broadcast_in_dim(a, s, (i,)))
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
return output
@_wraps(onp.ix_)
def ix_(*args):
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}"
raise ValueError(msg.format(a.shape))
if _dtype(a) == bool_:
raise NotImplementedError(
"Boolean arguments to jax.numpy.ix_ are not implemented")
shape = [1] * n
shape[i] = a.shape[0]
if a.size == 0:
# Numpy uses an integer index type for empty arrays.
output.append(lax.full(shape, onp.zeros((), onp.intp)))
else:
output.append(lax.reshape(a, shape))
return tuple(output)
def _repeat_scalar(a, repeats, axis=None):
if not isscalar(repeats):
raise NotImplementedError(
"_repeat_scalar implementation only supports scalar repeats")
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
# Broadcasts to [..., X, repeats, ...] and reshapes to [..., X * repeats, ...]
broadcast_shape = list(a_shape)
broadcast_shape.insert(axis + 1, repeats)
broadcast_dims = onp.concatenate((onp.arange(0, axis + 1),
onp.arange(axis + 2, num_dims + 1)))
a_shape[axis] *= repeats
return lax.reshape(
lax.broadcast_in_dim(a, broadcast_shape, broadcast_dims),
a_shape)
@_wraps(onp.repeat)
def repeat(a, repeats, axis=None):
'''
:param repeats: int or array of ints
'''
# use `_repeat_scalar` when possible
if isscalar(repeats):
return _repeat_scalar(a, repeats, axis)
repeats_raveled = ravel(array(repeats)) # make sure it's jax's array type
if size(repeats_raveled) == 1:
return _repeat_scalar(a, list(repeats_raveled)[0], axis)
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
# repeats must match the dimension along the requested axis
a_shape = list(a.shape)
n = a_shape[axis]
if size(repeats_raveled) != n:
raise ValueError("repeats shape {} does not match the dimension on axis {}".format(
repeats_raveled.shape, n
))
# calculating the new shape
total = sum(repeats_raveled)
new_shape = a_shape[:]
new_shape[axis] = total
a_flattened = ravel(a)
'''
main algorithm:
first break down raveled input array into list of chunks; each chunk is the unit of repeat
then tile the repeats to have same length as the list of chunks
finally repeat each unit x number of times according to the tiled repeat list
'''
chunks = product(a_shape[:axis+1]).item()
a_splitted = split(a_flattened, chunks)
repeats_tiled = tile(repeats_raveled, chunks // len(repeats_raveled))
ret = array([], dtype=a.dtype)
for i, repeat in enumerate(repeats_tiled):
if not isinstance(repeat, int):
repeat = repeat.item()
if repeat != 0:
ret = concatenate((ret, tile(a_splitted[i], repeat)))
return reshape(ret, new_shape)
@_wraps(onp.tri)
def tri(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "tri")
M = M if M is not None else N
dtype = dtype or float32
return lax._tri(dtype, (N, M), k)
@_wraps(onp.tril)
def tril(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.tril must be at least 2D")
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))
@_wraps(onp.triu, update_doc=False)
def triu(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.triu must be at least 2D")
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)
@_wraps(onp.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if out:
raise NotImplementedError("The 'out' argument to trace is not supported.")
lax._check_user_dtype_supported(dtype, "trace")
axis1 = _canonicalize_axis(axis1, ndim(a))
axis2 = _canonicalize_axis(axis2, ndim(a))
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = dtypes.canonicalize_dtype(onp.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
return sum(a, axis=(-2, -1), dtype=dtype)
def _wrap_indices_function(f):
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return tuple(asarray(x) for x in f(*args, **kwargs))
return wrapper
tril_indices = _wrap_indices_function(onp.tril_indices)
triu_indices = _wrap_indices_function(onp.triu_indices)
mask_indices = _wrap_indices_function(onp.mask_indices)
@_wraps(onp.diag_indices)
def diag_indices(n, ndim=2):
if n < 0:
raise ValueError("n argument to diag_indices must be nonnegative, got {}"
.format(n))
if ndim < 0:
raise ValueError("ndim argument to diag_indices must be nonnegative, got {}"
.format(ndim))
return (lax.iota(int_, n),) * ndim
@_wraps(onp.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
a_shape = shape(a)
a_ndims = len(a_shape)
# Move the two dimensions to the end.
axis1 = _canonicalize_axis(axis1, a_ndims)
axis2 = _canonicalize_axis(axis2, a_ndims)
perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce over one of the axes
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
reduce_axis = -2 if offset < 0 else -1
d = sum(a, axis=reduce_axis, dtype=_dtype(a))
# Slice out the correct diagonal size.
diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),
a_shape[axis2] - _max(offset, 0)))
return lax.slice_in_dim(d, 0, diag_size, axis=-1)
@_wraps(onp.diag)
def diag(v, k=0):
v_shape = shape(v)
if len(v_shape) == 1:
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
n = v_shape[0] + _abs(k)
v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))
return where(eye(n, k=k, dtype=bool), v, zeros_like(v))
elif len(v_shape) == 2:
return diagonal(v, offset=k)
else:
raise ValueError("diag input must be 1d or 2d")
@_wraps(onp.polyval)
def polyval(p, x):
if isinstance(p, onp.poly1d):
p = onp.asarray(p)
if isinstance(x, onp.poly1d):
y = 0
else:
y = zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
@_wraps(onp.append)
def append(arr, values, axis=None):
if axis is None:
return concatenate([ravel(arr), ravel(values)], 0)
else:
return concatenate([arr, values], axis=axis)
### Tensor contraction operations
_PRECISION_DOC = """\
In addition to the original NumPy arguments listed below, also supports
``precision`` for extra control over matrix-multiplication precision
on supported devices. See :py:func:`jax.lax.dot` for details.
"""
@_wraps(onp.dot, lax_description=_PRECISION_DOC)
def dot(a, b, precision=None): # pylint: disable=missing-docstring
_check_arraylike("dot", a, b)
a, b = _promote_dtypes(a, b)
a_ndim, b_ndim = ndim(a), ndim(b)
if a_ndim == 0 or b_ndim == 0:
return lax.mul(a, b)
if _max(a_ndim, b_ndim) <= 2:
return lax.dot(a, b, precision=precision)
if b_ndim == 1:
contract_dims = ((a_ndim - 1,), (0,))
else:
contract_dims = ((a_ndim - 1,), (b_ndim - 2,))
batch_dims = ((), ())
return lax.dot_general(a, b, (contract_dims, batch_dims), precision)
@_wraps(onp.matmul, lax_description=_PRECISION_DOC)
def matmul(a, b, precision=None): # pylint: disable=missing-docstring
_check_arraylike("matmul", a, b)
a_is_vec, b_is_vec = (ndim(a) == 1), (ndim(b) == 1)
a = lax.reshape(a, (1,) + shape(a)) if a_is_vec else a
b = lax.reshape(b, shape(b) + (1,)) if b_is_vec else b
a, b = _promote_dtypes(a, b)
batch_shape = lax.broadcast_shapes(shape(a)[:-2], shape(b)[:-2])
a = broadcast_to(a, batch_shape + shape(a)[-2:])
b = broadcast_to(b, batch_shape + shape(b)[-2:])
batch_dims = tuple(range(len(batch_shape)))
dim_numbers = (((ndim(a) - 1,), (ndim(b) - 2,)), (batch_dims, batch_dims))
result = lax.dot_general(a, b, dim_numbers, precision)
if a_is_vec or b_is_vec:
m, n = shape(result)[-2:]
new_m = () if a_is_vec else (m,)
new_n = () if b_is_vec else (n,)
return lax.reshape(result, batch_shape + new_m + new_n)
else:
return result
@_wraps(onp.vdot, lax_description=_PRECISION_DOC)
def vdot(a, b, precision=None):
if issubdtype(_dtype(a), complexfloating):
a = conj(a)
return dot(a.ravel(), b.ravel(), precision=precision)
@_wraps(onp.tensordot, lax_description=_PRECISION_DOC)
def tensordot(a, b, axes=2, precision=None):
_check_arraylike("tensordot", a, b)
a_ndim = ndim(a)
b_ndim = ndim(b)
a, b = _promote_dtypes(a, b)
if type(axes) is int:
if axes > _min(a_ndim, b_ndim):
msg = "Number of tensordot axes (axes {}) exceeds input ranks ({} and {})"
raise TypeError(msg.format(axes, a.shape, b.shape))
contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),
(_canonicalize_axis(ax2, b_ndim),))
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = "tensordot requires axes lists to have equal length, got {} and {}."
raise TypeError(msg.format(ax1, ax2))
contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),
tuple(_canonicalize_axis(i, b_ndim) for i in ax2))
else:
msg = "tensordot requires both axes lists to be either ints, tuples or lists, got {} and {}"
raise TypeError(msg.format(ax1, ax2))
else:
msg = ("tensordot axes argument must be an int, a pair of ints, or a pair "
"of lists/tuples of ints.")
raise TypeError(msg)
return lax.dot_general(a, b, (contracting_dims, ((), ())),
precision=precision)
@_wraps(onp.einsum, lax_description=_PRECISION_DOC)
def einsum(*operands, **kwargs):
optimize = kwargs.pop('optimize', 'auto')
optimize = 'greedy' if optimize is True else optimize
precision = kwargs.pop('precision', None)
if kwargs:
msg = 'invalid keyword arguments for einsum: {}'
raise TypeError(msg.format(', '.join(kwargs)))
# using einsum_call=True here is an internal api for opt_einsum
operands, contractions = opt_einsum.contract_path(
*operands, einsum_call=True, use_blas=True, optimize=optimize)
contractions = tuple(data[:3] for data in contractions)
return _einsum(operands, contractions, precision)
@_wraps(onp.einsum_path)
def einsum_path(subscripts, *operands, **kwargs):
optimize = kwargs.pop('optimize', 'greedy')
# using einsum_call=True here is an internal api for opt_einsum
return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)
def _removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
@partial(jit, static_argnums=(1, 2))
def _einsum(operands, contractions, precision):
operands = list(_promote_dtypes(*operands))
def sum(x, axes):
return lax.reduce(x, onp.array(0, x.dtype),
lax.add if x.dtype != bool_ else lax.bitwise_or, axes)
def sum_uniques(operand, names, uniques):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum(operand, axes)
names = _removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = lax._delta(operand.dtype, operand.shape, axes)
if name not in keep_names:
operand = sum(operand * eye, axes)
names = names.replace(name, '')
else:
operand = sum(operand * eye, axes[:-1])
names = names.replace(name, '', count - 1)
return operand, names
def filter_singleton_dims(operand, names, other_shape, other_names):
s = shape(operand)
new_shape = []
new_names = []
for i, d in enumerate(names):
other_i = other_names.find(d)
if s[i] != 1 or other_i == -1 or other_shape[other_i] == 1:
new_shape.append(s[i])
new_names.append(d)
return reshape(operand, tuple(new_shape)), "".join(new_names)
for operand_indices, contracted_names, einstr in contractions:
input_str, result_names = einstr.split('->')
input_names = input_str.split(',')
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques)
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names)
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_names, rhs_names = input_names
# handle cases where one side of a contracting or batch dimension is 1
# but its counterpart is not.
lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs),
rhs_names)
rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs),
lhs_names)
lhs_counts = collections.Counter(lhs_names)
rhs_counts = collections.Counter(rhs_names)
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names)
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names)
contracted_names = contracted_names & (set(lhs_names) | set(rhs_names))
batch_names = (set(lhs_names) & set(rhs_names)) - contracted_names
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert _all(
name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names)
# move batch dims to the front (required by lax.dot_general, and easier)
batch_dims = tuple(range(len(batch_names)))
if lhs_batch != rhs_batch or set(lhs_batch) != set(batch_dims):
lhs = moveaxis(lhs, lhs_batch, batch_dims)
lhs_names = _movechars(lhs_names, lhs_batch, batch_dims)
rhs = moveaxis(rhs, rhs_batch, batch_dims)
rhs_names = _movechars(rhs_names, rhs_batch, batch_dims)
batch_names = ''.join(batch_names)
else:
batch_dims = tuple(lhs_batch)
batch_names = ''.join(lhs_names[i] for i in range(len(lhs_names))
if i in batch_dims)
# contract using lax.dot_general
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
bdims = tuple(range(len(batch_dims)))
dimension_numbers = [(lhs_cont, rhs_cont), (bdims, bdims)]
operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)
deleted_names = batch_names + ''.join(contracted_names)
names = (batch_names + _removechars(lhs_names, deleted_names)
+ _removechars(rhs_names, deleted_names))
else:
raise NotImplementedError # if this is actually reachable, open an issue!
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple([names.index(name) for name in result_names])
operand = lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return operands[0]
def _movechars(s, src, dst):
"""Helper for einsum string munging, like moveaxis on identifier strings."""
chars = [c for i, c in enumerate(s) if i not in src]
for i, j in sorted(zip(dst, src)):
chars.insert(i, s[j])
return ''.join(chars)
@_wraps(onp.inner, lax_description=_PRECISION_DOC)
def inner(a, b, precision=None):
if ndim(a) == 0 or ndim(b) == 0:
return a * b
return tensordot(a, b, (-1, -1), precision=precision)
@_wraps(onp.outer)
def outer(a, b, out=None):
if out:
raise NotImplementedError("The 'out' argument to outer is not supported.")
a, b = _promote_dtypes(a, b)
return ravel(a)[:, None] * ravel(b)
@partial(jit, static_argnums=(2, 3, 4))
def _cross(a, b, axisa, axisb, axisc):
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError("Dimension must be either 2 or 3 for cross product")
if a.shape[-1] == 2 and b.shape[-1] == 2:
return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]
a0 = a[..., 0]
a1 = a[..., 1]
a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)
b0 = b[..., 0]
b1 = b[..., 1]
b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)
c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])
return moveaxis(c, 0, axisc)
@_wraps(onp.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
if axis is not None:
axisa = axis
axisb = axis
axisc = axis
return _cross(a, b, axisa, axisb, axisc)
@_wraps(onp.kron)
def kron(a, b):
a, b = _promote_dtypes(a, b)
if ndim(a) < ndim(b):
a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))
elif ndim(b) < ndim(a):
b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))
a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])
b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])
out_shape = tuple(onp.multiply(shape(a), shape(b)))
return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)
@_wraps(onp.vander)
def vander(x, N=None, increasing=False):
x = asarray(x)
dtype = _dtype(x)
if ndim(x) != 1:
raise ValueError("x must be a one-dimensional array")
x_shape = shape(x)
N = N or x_shape[0]
if N < 0:
raise ValueError("N must be nonnegative")
iota = lax.iota(dtype, N)
if not increasing:
iota = lax.sub(lax._const(iota, N - 1), iota)
return power(x[..., None], iota)
### Misc
@_wraps(onp.argmax)
def argmax(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
return _argminmax(max, a, axis)
@_wraps(onp.argmin)
def argmin(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
return _argminmax(min, a, axis)
# TODO(mattjj): redo this lowering with a call to variadic lax.reduce
def _argminmax(op, a, axis):
shape = [1] * a.ndim
shape[axis] = a.shape[axis]
idxs = lax.tie_in(a, arange(a.shape[axis])).reshape(shape)
maxval = iinfo(dtypes.canonicalize_dtype(idxs.dtype)).max
maxval = lax.tie_in(a, maxval)
mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval)
return min(mask_idxs, axis)
@_wraps(onp.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to sort is ignored.")
if order is not None:
raise ValueError("'order' argument to sort is not supported.")
if axis is None:
return lax.sort(a.ravel(), 0)
else:
return lax.sort(a, _canonicalize_axis(axis, ndim(a)))
@_wraps(onp.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to argsort is ignored.")
if order is not None:
raise ValueError("'order' argument to argsort is not supported.")
if axis is None:
return argsort(a.ravel(), 0)
else:
axis = _canonicalize_axis(axis, ndim(a))
iota = lax.broadcasted_iota(onp.int64, shape(a), axis)
_, perm = lax.sort_key_val(a, iota, dimension=axis)
return perm
@_wraps(onp.msort)
def msort(a):
return sort(a, axis=0)
@partial(jit, static_argnums=(2,))
def _roll(a, shift, axis):
a = asarray(a)
a_shape = shape(a)
if axis is None:
return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)
a_ndim = len(a_shape)
shift = asarray(shift)
axis = onp.asarray(axis)
b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
for x, i in zip(broadcast_to(shift, b_shape),
onp.broadcast_to(axis, b_shape)):
i = _canonicalize_axis(i, a_ndim)
x = remainder(x, (a_shape[i] or 1))
a = lax.concatenate((a, a), i)
a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)
return a
@_wraps(onp.roll)
def roll(a, shift, axis=None):
return _roll(a, shift, axis)
@_wraps(onp.rollaxis)
def rollaxis(a, axis, start=0):
a_ndim = ndim(a)
if not (-a_ndim <= axis < a_ndim):
raise ValueError(f"axis={axis} is out of bounds for array of dimension {a_ndim}")
if not (-a_ndim <= start <= a_ndim):
raise ValueError(f"start={start} must satisfy {-a_ndim}<=start<={a_ndim}")
if start < 0:
start += a_ndim
if axis < 0:
axis += a_ndim
if start > axis:
start -= 1
return moveaxis(a, axis, start)
@_wraps(onp.take)
def take(a, indices, axis=None, out=None, mode=None):
if out:
raise NotImplementedError("The 'out' argument to np.take is not supported.")
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = ravel(a)
axis = 0
axis = _canonicalize_axis(axis, ndim(a))
if mode == "raise":
# TODO(phawkins): we have no way to report out of bounds errors yet.
raise NotImplementedError("The 'raise' mode to np.take is not supported.")
elif mode == "wrap":
indices = mod(indices, _constant_like(indices, a.shape[axis]))
elif mode != "clip" and mode is not None:
raise ValueError("Invalid mode '{}' for np.take".format(mode))
index_dims = len(shape(indices))
slice_sizes = list(shape(a))
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(
list(range(axis)) +
list(range(axis + index_dims, len(a.shape) + index_dims - 1))),
collapsed_slice_dims=(axis,),
start_index_map=(axis,))
return lax.gather(a, indices[..., None], dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index)
@partial(jit, static_argnums=(2,))
def _take_along_axis(arr, indices, axis):
if axis is None:
if ndim(indices) != 1:
msg = "take_along_axis indices must be 1D if axis=None, got shape {}"
raise ValueError(msg.format(indices.shape))
return take_along_axis(arr.ravel(), indices, 0)
rank = ndim(arr)
if rank != ndim(indices):
msg = "indices and arr must have the same number of dimensions; {} vs. {}"
raise ValueError(msg.format(ndim(indices), ndim(arr)))
axis = _canonicalize_axis(axis, rank)
def replace(tup, val):
lst = list(tup)
lst[axis] = val
return tuple(lst)
bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))
indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))
arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))
axis_size = arr.shape[axis]
arr_shape = replace(arr.shape, 1)
idx_shape = indices.shape
out_shape = lax.broadcast_shapes(idx_shape, arr_shape)
index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]
gather_index_shape = tuple(onp.array(out_shape)[index_dims]) + (1,)
gather_indices = []
slice_sizes = []
offset_dims = []
start_index_map = []
collapsed_slice_dims = []
j = 0
for i in range(rank):
if i == axis:
indices = _normalize_index(indices, axis_size)
gather_indices.append(lax.reshape(indices, gather_index_shape))
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
elif idx_shape[i] != 1:
iota = lax.iota(_dtype(indices), out_shape[i])
iota = lax.tie_in(arr, iota)
iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))
gather_indices.append(iota)
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
else:
# If idx_shape[i] == 1, we can just take the entirety of the arr's axis
# and avoid forming an iota index.
offset_dims.append(i)
slice_sizes.append(arr_shape[i])
gather_indices = lax.concatenate(gather_indices, dimension=j)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(collapsed_slice_dims),
start_index_map=tuple(start_index_map))
return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))
@_wraps(getattr(onp, "take_along_axis", None), update_doc=False)
def take_along_axis(arr, indices, axis):
return _take_along_axis(arr, indices, axis)
### Indexing
def _rewriting_take(arr, idx):
# Computes arr[idx].
# All supported cases of indexing can be implemented as an XLA gather,
# followed by an optional reverse and a reshape.
arr = asarray(arr)
treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)
return _gather(arr, treedef, static_idx, dynamic_idx)
# TODO(phawkins): re-enable jit after fixing excessive recompilation for
# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).
# @partial(jit, static_argnums=(1, 2))
def _gather(arr, treedef, static_idx, dynamic_idx):
idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)
indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update
y = arr
# Avoid calling gather if the slice shape is empty, both as a fast path and to
# handle cases like zeros(0)[array([], int32)].
if _prod(indexer.slice_shape) == 0:
return zeros(indexer.slice_shape, dtype=y.dtype)
# We avoid generating a gather when indexer.gather_indices.size is empty.
if indexer.gather_indices.size:
y = lax.gather(y, indexer.gather_indices, indexer.dnums,
indexer.gather_slice_shape)
# Reverses axes with negative strides.
if indexer.reversed_y_dims:
y = lax.rev(y, indexer.reversed_y_dims)
# This adds np.newaxis/None dimensions.
return lax.reshape(y, indexer.slice_shape)
_Indexer = collections.namedtuple("_Indexer", [
# The expected shape of the slice output.
"slice_shape",
# The slice shape to pass to lax.gather().
"gather_slice_shape",
# The gather indices to use.
"gather_indices",
# A GatherDimensionNumbers object describing the gather to perform.
"dnums",
# Slice dimensions that have negative strides, and so must be reversed after
# the gather.
"reversed_y_dims",
# For scatters, we must eliminate any axes created by `newaxis`, which
# are the following dimensions, which must be of size 1. For gathers, we
# simply reshape to `slice_shape` to introduce the new axes.
"newaxis_dims",
])
def _split_index_for_jit(idx):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
"""
# Convert list indices to tuples in cases (deprecated by NumPy.)
idx = _eliminate_deprecated_list_indexing(idx)
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
idx = _expand_bool_indices(idx)
leaves, treedef = pytree.flatten(idx)
dynamic = [None] * len(leaves)
static = [None] * len(leaves)
for i, x in enumerate(leaves):
if x is Ellipsis:
static[i] = x
elif isinstance(x, slice):
# slice objects aren't hashable.
static[i] = (x.start, x.stop, x.step)
else:
dynamic[i] = x
return treedef, tuple(static), dynamic
def _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):
"""Recombines indices that were split by _split_index_for_jit."""
idx = []
for s, d in zip(static_idx, dynamic_idx):
if d is not None:
idx.append(d)
elif isinstance(s, tuple):
idx.append(slice(s[0], s[1], s[2]))
else:
idx.append(s)
return treedef.unflatten(idx)
def _int(aval):
return not aval.shape and issubdtype(aval.dtype, integer)
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)
if (isinstance(e, Sequence) or isinstance(e, ndarray)))
advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)
for e, i, j in advanced_pairs)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
index_dtype = int64 if max(x_shape) >= (1 << 31) else int32
gather_indices = onp.zeros((0,), dtype=index_dtype) # use onp to save a compilation
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if (advanced_indexes is not None and
(advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or
not advanced_axes_are_contiguous and idx_pos == 0)):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype)
for a in advanced_indexes]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices, onp.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,))
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if (isinstance(abstract_i, ConcreteArray) or
isinstance(abstract_i, ShapedArray)) and _int(abstract_i):
if x_shape[x_axis] == 0:
# XLA gives error when indexing into an axis of size 0
raise IndexError(f"index is out of bounds for axis {x_axis} with size 0")
i = _normalize_index(i, x_shape[x_axis])
i = lax.convert_element_type(i, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(elt is None or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)):
msg = ("Array slice indices must have static start/stop/step to be used "
"with Numpy indexing syntax. Try lax.dynamic_slice/"
"dynamic_update_slice instead.")
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=index_dtype)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i, shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,))
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1)) +
(len(gather_indices_shape),)))
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape))
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if (abstract_i is not None and
not (issubdtype(abstract_i.dtype, integer) or issubdtype(abstract_i.dtype, bool_))):
msg = ("Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}")
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims = tuple(offset_dims),
collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),
start_index_map = tuple(start_index_map)
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices)
def _should_unpack_list_index(x):
"""Helper for _eliminate_deprecated_list_indexing."""
return (isinstance(x, ndarray) and onp.ndim(x) != 0
or isinstance(x, Sequence)
or isinstance(x, slice) or x is Ellipsis or x is None)
def _eliminate_deprecated_list_indexing(idx):
# "Basic slicing is initiated if the selection object is a non-array,
# non-tuple sequence containing slice objects, [Ellipses, or newaxis
# objects]". Detects this case and canonicalizes to a tuple. This case is
# deprecated by NumPy and exists for backward compatibility.
if not isinstance(idx, tuple):
if isinstance(idx, Sequence) and not isinstance(idx, ndarray):
if _any(_should_unpack_list_index(i) for i in idx):
idx = tuple(idx)
else:
idx = (idx,)
else:
idx = (idx,)
return idx
def _expand_bool_indices(idx):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
for i in idx:
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), bool_)
for e in i)):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
if not type(abstract_i) is ConcreteArray:
msg = ("Array boolean indices must be static (e.g. no dependence on an "
"argument to a jit or vmap function).")
raise IndexError(msg)
else:
out.extend(onp.where(i))
else:
out.append(i)
return tuple(out)
def _is_slice_none(idx):
"""Return True if idx is equal to slice(None), False otherwise."""
if isinstance(idx, slice):
return idx.start is None and idx.stop is None and idx.step is None
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
"""Returns True if idx should trigger int array indexing, False otherwise."""
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
assert isinstance(idx, tuple)
if _all(onp.ndim(elt) == 0 for elt in idx):
return False
return _all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_int_arraylike(e) for e in idx)
def _is_int_arraylike(x):
"""Returns True if x is array-like with integer dtype, False otherwise."""
return (isinstance(x, int) and not isinstance(x, bool)
or issubdtype(getattr(x, "dtype", None), onp.integer)
or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))
def _canonicalize_tuple_index(arr_ndim, idx):
"""Helper to remove Ellipsis and add in the implicit trailing slice(None)."""
len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)
if len_without_none > arr_ndim:
msg = "Too many indices for array: {} non-None/Ellipsis indices for dim {}."
raise IndexError(msg.format(len_without_none, arr_ndim))
ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)
ellipsis_index = next(ellipses, None)
if ellipsis_index is not None:
if next(ellipses, None) is not None:
msg = "Multiple ellipses (...) not supported: {}."
raise IndexError(msg.format(list(map(type, idx))))
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]
elif len_without_none < arr_ndim:
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = tuple(idx) + colons
return idx
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
blackman = _wrap_numpy_nullary_function(onp.blackman)
bartlett = _wrap_numpy_nullary_function(onp.bartlett)
hamming = _wrap_numpy_nullary_function(onp.hamming)
hanning = _wrap_numpy_nullary_function(onp.hanning)
# TODO: lower `kaiser` via lax to allow non-constant beta values.
kaiser = _wrap_numpy_nullary_function(onp.kaiser)
def _gcd_cond_fn(xs):
x1, x2 = xs
return any(x2 != 0)
def _gcd_body_fn(xs):
x1, x2 = xs
x1, x2 = (where(x2 != 0, x2, x1),
where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))
return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))
@_wraps(getattr(onp, "gcd", None))
def gcd(x1, x2):
if (not issubdtype(_dtype(x1), integer) or
not issubdtype(_dtype(x2), integer)):
raise ValueError("Arguments to gcd must be integers.")
x1, x2 = _promote_dtypes(x1, x2)
x1, x2 = broadcast_arrays(x1, x2)
gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn,
(lax.abs(x1), lax.abs(x2)))
return gcd
@_wraps(getattr(onp, "lcm", None))
def lcm(x1, x2):
x1, x2 = _promote_dtypes(x1, x2)
d = gcd(x1, x2)
return where(d == 0, lax._const(d, 0),
lax.div(lax.abs(multiply(x1, x2)), d))
@_wraps(onp.cov)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
msg = ("jax.numpy.cov not implemented for nontrivial {}. "
"Open a feature request at https://github.com/google/jax/issues !")
if y is not None: raise NotImplementedError(msg.format('y'))
# These next two are actually implemented, just not tested.
if fweights is not None: raise NotImplementedError(msg.format('fweights'))
if aweights is not None: raise NotImplementedError(msg.format('aweights'))
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions") # same as numpy error
X = array(m, ndmin=2, dtype=dtypes.canonicalize_dtype(result_type(m, float_)))
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return onp.array([]).reshape(0, 0)
if ddof is None:
ddof = 1 if bias == 0 else 0
w = None
if fweights is not None:
if onp.ndim(fweights) > 1:
raise RuntimeError("cannot handle multidimensional fweights")
if onp.shape(fweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
w = asarray(fweights)
if aweights is not None:
if onp.ndim(aweights) > 1:
raise RuntimeError("cannot handle multidimensional aweights")
if onp.shape(aweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
w = aweights if w is None else w * aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
f = X.shape[1] - ddof
elif ddof == 0:
f = w_sum
elif aweights is None:
f = w_sum - ddof
else:
f = w_sum - ddof * sum(w * aweights) / w_sum
X = X - avg[:, None]
X_T = X.T if w is None else (X * w).T
return true_divide(dot(X, X_T.conj()), f).squeeze()
@_wraps(onp.corrcoef)
def corrcoef(x, y=None, rowvar=True):
c = cov(x, y, rowvar)
if len(shape(c)) == 0:
# scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise
return divide(c, c)
d = diag(c)
stddev = sqrt(real(d))
c = divide(c, stddev[:,None])
c = divide(c, stddev[None,:])
real_part = clip(real(c), -1, 1)
if iscomplexobj(c):
complex_part = clip(imag(c), -1, 1)
c = lax.complex(real_part, complex_part)
else:
c = real_part
return c
@_wraps(getattr(onp, "quantile", None))
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
if overwrite_input or out is not None:
msg = ("jax.numpy.quantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
if interpolation not in ["linear", "lower", "higher", "midpoint", "nearest"]:
raise ValueError("interpolation can only be 'linear', 'lower', 'higher', 'midpoint', or 'nearest'")
return _quantile(a, q, axis, interpolation, keepdims)
@partial(jit, static_argnums=(2, 3, 4))
def _quantile(a, q, axis, interpolation, keepdims):
a = asarray(a)
if axis is None:
a = ravel(a)
axis = 0
elif isinstance(axis, tuple):
raise NotImplementedError("Tuple values for axis are not implemented")
else:
axis = _canonicalize_axis(axis, ndim(a))
q_ndim = ndim(q)
if q_ndim > 1:
raise ValueError("q must be have rank <= 1, got shape {}".format(shape(q)))
q = asarray(q)
if not issubdtype(a.dtype, floating) or not issubdtype(q.dtype, floating):
msg = "q and a arguments to quantile must be of float type, got {} and {}"
raise TypeError(msg.format(a.dtype, q.dtype))
# Promote q to at least float32 for precise interpolation.
q = lax.convert_element_type(q, promote_types(q.dtype, float32))
a_shape = shape(a)
a = lax.sort(a, dimension=axis)
n = a_shape[axis]
q = lax.mul(q, _constant_like(q, n - 1))
low = lax.floor(q)
high = lax.ceil(q)
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))
high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
slice_sizes = list(a_shape)
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(range(
q_ndim,
len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),
collapsed_slice_dims=() if keepdims else (axis,),
start_index_map=(axis,))
low = low[..., None]
high = high[..., None]
low_value = lax.gather(a, low, dimension_numbers=dnums,
slice_sizes=slice_sizes)
high_value = lax.gather(a, high, dimension_numbers=dnums,
slice_sizes=slice_sizes)
if q_ndim == 1:
low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,
broadcast_dimensions=(0,))
high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,
broadcast_dimensions=(0,))
if interpolation == "linear":
result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight),
lax.mul(high_value.astype(q.dtype), high_weight))
elif interpolation == "lower":
result = low_value
elif interpolation == "higher":
result = high_value
elif interpolation == "nearest":
pred = lax.le(high_weight, _constant_like(high_weight, 0.5))
result = lax.select(pred, low_value, high_value)
elif interpolation == "midpoint":
result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))
else:
raise ValueError(f"interpolation={interpolation!r} not recognized")
return lax.convert_element_type(result, a.dtype)
@_wraps(onp.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
q = true_divide(asarray(q), float32(100.0))
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(onp.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
q = 0.5
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
keepdims=keepdims, interpolation='midpoint')
def _astype(arr, dtype):
lax._check_user_dtype_supported(dtype, "astype")
return lax.convert_element_type(arr, dtype)
### track unimplemented functions
def _not_implemented(fun):
@_wraps(fun)
def wrapped(*args, **kwargs):
msg = "Numpy function {} not yet implemented"
raise NotImplementedError(msg.format(fun))
return wrapped
# Build a set of all unimplemented NumPy functions.
for func in get_module_functions(onp):
if func.__name__ not in globals():
globals()[func.__name__] = _not_implemented(func)
### add method and operator overloads to arraylike classes
# We add operator overloads to DeviceArray and ShapedArray. These method and
# operator overloads mainly just forward calls to the corresponding lax_numpy
# functions, which can themselves handle instances from any of these classes.
_scalar_types = (int, float, complex, onp.generic)
def _defer_to_unrecognized_arg(binary_op):
# Ensure that other array types have the chance to override arithmetic.
def deferring_binary_op(self, other):
if not isinstance(other, _scalar_types + _arraylike_types + (core.Tracer,)):
return NotImplemented
return binary_op(self, other)
return deferring_binary_op
def _swap_args(f):
return lambda x, y: f(y, x)
def _unimplemented_setitem(self, i, x):
msg = ("'{}' object does not support item assignment. JAX arrays are "
"immutable; perhaps you want jax.ops.index_update or "
"jax.ops.index_add instead?")
raise TypeError(msg.format(type(self)))
def _operator_round(number, ndigits=None):
out = round(number, decimals=ndigits or 0)
# If `ndigits` is None, for a builtin float round(7.5) returns an integer.
return out.astype(int_) if ndigits is None else out
_operators = {
"getitem": _rewriting_take,
"setitem": _unimplemented_setitem,
"neg": negative,
"pos": positive,
"eq": _defer_to_unrecognized_arg(equal),
"ne": _defer_to_unrecognized_arg(not_equal),
"lt": _defer_to_unrecognized_arg(less),
"le": _defer_to_unrecognized_arg(less_equal),
"gt": _defer_to_unrecognized_arg(greater),
"ge": _defer_to_unrecognized_arg(greater_equal),
"abs": abs,
"add": _defer_to_unrecognized_arg(add),
"radd": _defer_to_unrecognized_arg(add),
"sub": _defer_to_unrecognized_arg(subtract),
"rsub": _defer_to_unrecognized_arg(_swap_args(subtract)),
"mul": _defer_to_unrecognized_arg(multiply),
"rmul": _defer_to_unrecognized_arg(multiply),
"div": _defer_to_unrecognized_arg(divide),
"rdiv": _defer_to_unrecognized_arg(_swap_args(divide)),
"truediv": _defer_to_unrecognized_arg(true_divide),
"rtruediv": _defer_to_unrecognized_arg(_swap_args(true_divide)),
"floordiv": _defer_to_unrecognized_arg(floor_divide),
"rfloordiv": _defer_to_unrecognized_arg(_swap_args(floor_divide)),
"divmod": _defer_to_unrecognized_arg(divmod),
"rdivmod": _defer_to_unrecognized_arg(_swap_args(divmod)),
"mod": _defer_to_unrecognized_arg(mod),
"rmod": _defer_to_unrecognized_arg(_swap_args(mod)),
"pow": _defer_to_unrecognized_arg(power),
"rpow": _defer_to_unrecognized_arg(_swap_args(power)),
"matmul": _defer_to_unrecognized_arg(matmul),
"rmatmul": _defer_to_unrecognized_arg(_swap_args(matmul)),
"and": _defer_to_unrecognized_arg(bitwise_and),
"rand": _defer_to_unrecognized_arg(bitwise_and),
"or": _defer_to_unrecognized_arg(bitwise_or),
"ror": _defer_to_unrecognized_arg(bitwise_or),
"xor": _defer_to_unrecognized_arg(bitwise_xor),
"rxor": _defer_to_unrecognized_arg(bitwise_xor),
"invert": bitwise_not,
"lshift": _defer_to_unrecognized_arg(left_shift),
"rshift": _defer_to_unrecognized_arg(right_shift),
"round": _operator_round,
}
# These numpy.ndarray methods are just refs to an equivalent numpy function
_nondiff_methods = ["all", "any", "argmax", "argmin", "argpartition", "argsort",
"nonzero", "searchsorted", "round"]
_diff_methods = ["clip", "compress", "conj", "conjugate", "cumprod", "cumsum",
"diagonal", "dot", "max", "mean", "min", "prod", "ptp",
"ravel", "repeat", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "tile", "trace", "transpose", "var"]
# Set up operator, method, and property forwarding on Tracer instances containing
# ShapedArray avals by following the forwarding conventions for Tracer.
# Forward operators using a single-underscore-prefix naming convention:
for operator_name, function in _operators.items():
setattr(ShapedArray, "_{}".format(operator_name), staticmethod(function))
# Forward methods and properties using core.aval_method and core.aval_property:
for method_name in _nondiff_methods + _diff_methods:
setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))
setattr(ShapedArray, "reshape", core.aval_method(_reshape_method))
setattr(ShapedArray, "flatten", core.aval_method(ravel))
setattr(ShapedArray, "T", core.aval_property(transpose))
setattr(ShapedArray, "real", core.aval_property(real))
setattr(ShapedArray, "imag", core.aval_property(imag))
setattr(ShapedArray, "astype", core.aval_method(_astype))
# Forward operators, methods, and properties on DeviceArray to lax_numpy
# functions (with no Tracers involved; this forwarding is direct)
for operator_name, function in _operators.items():
setattr(DeviceArray, "__{}__".format(operator_name), function)
for method_name in _nondiff_methods + _diff_methods:
setattr(DeviceArray, method_name, globals()[method_name])
setattr(DeviceArray, "reshape", _reshape_method)
setattr(DeviceArray, "flatten", ravel)
setattr(DeviceArray, "T", property(transpose))
setattr(DeviceArray, "real", property(real))
setattr(DeviceArray, "imag", property(imag))
setattr(DeviceArray, "astype", _astype)
setattr(DeviceArray, "tolist", lambda x: onp.array(x).tolist())
# Extra methods that are handy
setattr(ShapedArray, "broadcast", core.aval_method(lax.broadcast))
setattr(ShapedArray, "broadcast_in_dim", core.aval_method(lax.broadcast_in_dim))
setattr(ShapedArray, "split", core.aval_method(split))
setattr(DeviceArray, "broadcast", lax.broadcast)
setattr(DeviceArray, "broadcast_in_dim", lax.broadcast_in_dim)
setattr(DeviceArray, "split", split)
@jit
def _unstack(x):
if x.ndim == 0:
raise ValueError("Argument to _unstack must be non-scalar")
return [lax.index_in_dim(x, i, keepdims=False) for i in range(x.shape[0])]
setattr(DeviceArray, "_unstack", _unstack)
|
the-stack_106_29907 | #!/usr/bin/env python
from pylab import *
from numpy import exp, abs, meshgrid, linspace, array, sin, cos, pi, sqrt
from mpl_toolkits.mplot3d import Axes3D
v0 = array([1, 1, 0])
eta = array([0.2, -0.2, 0])
r = linspace(0, 1, 300)
p = linspace(0, 2*pi, 300)
R, P = meshgrid(r, p)
f = lambda r, p: r*cos(p) + 1
g = lambda r, p: r*sin(p) + 1
h = lambda r: (r**2 - 1)**2
X, Y, Z = f(R, P), g(R, P), h(R)
rho = sqrt(eta[0]**2 + eta[1]**2 + eta[2]**2)
point0 = array([1 + rho, 1 - rho, h(rho)])
normal = array([1, -1, 1])
d = -point0.dot(normal)
x = linspace(1.125, 1.375, 100)
y = linspace(0.625, 0.875, 100)
xx, yy = np.meshgrid(x, y)
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
a3d = gca(projection='3d')
a3d.plot_surface(xx, yy, z, alpha=0.25, color="r")
a3d.plot_surface(X, Y, Z, alpha=0.35)
#
o = array([0, 0, 0])
ex = array([1.75, 0, 0]) + o
ey = array([0, 1.75, 0]) + o
ez = array([0, 0, 1.0]) + o
xs, ys, zs = zip(o, ex)
plot(xs, ys, zs)
xs, ys, zs = zip(o, ey)
plot(xs, ys, zs)
xs, ys, zs = zip(o, ez)
plot(xs, ys, zs)
xs, ys, zs = [v0[0] + eta[0]], [v0[1] + eta[1]], [h(sqrt(eta[0]**2 + eta[1]**2+ eta[2]**2))]
a3d.scatter(xs, ys, zs)
xs1, ys1, zs1 = zip([0, v0[1] + eta[1], 0], v0 + eta, [v0[0] + eta[0], 0, 0])
plot(xs1, ys1, zs1, "--")
plot([xs[0], xs1[1]], [ys[0], ys1[1]], [zs[0], zs1[1]], "--")
a3d.quiver(xs[0] + 0.2, ys[0] - 0.2, zs[0] + 0.2, 1, -1, 1, linewidth=2, alpha=1, length=0.35)
a3d.quiver(xs[0] + 0.14, ys[0] - 0.14, zs[0] - 0.14, 1, -1, -1, linewidth=2, alpha=1, length=0.25)
a3d.text(v0[0] + eta[0] - 0.025, v0[1] + eta[1] - 0.1, zs[0], r'$\mathcal{G}(v_0)$', fontsize=20)
a3d.text(v0[0] + eta[0] + 0.05, v0[1] + eta[1] + 0.1, zs[0], r'$\nabla_v \mathcal{G}(v_0)$', fontsize=20)
a3d.text(v0[0] + eta[0] + 0.175, v0[1] + eta[1] - 0.2, zs[0], r'$\frac{d R(t)}{dt}$', fontsize=20)
grid(False)
a = gca()
xticks([])
yticks([])
a.set_zticks([])
a.set_zlim3d([0.5, 1])
a.set_ylim3d([0.75, 1.25])
a.set_xlim3d([0.75, 1.25])
a.set_xticklabels([])
a.set_yticklabels([])
a.set_zticklabels([])
a.set_axis_off()
# Se guarda la figura en la misma carpeta
savefig("superficie3dplano.pdf", bbox_inches='tight', pad_inches=0, transparent="True")
|
the-stack_106_29911 | # -*- coding: utf-8 -*-
from cgi import FieldStorage
from os import environ
#DB_HOST = 'localhost'
#DB_USER = 'hablemosdeazucar'
#DB_PASS = 'mrqJoBiwECmMCAsPVK4UUxsc'
#DB_NAME = 'hablemosdeazucarcgi'
#db_data = [DB_HOST, DB_USER, DB_PASS, DB_NAME]
POST = FieldStorage()
DOCUMENT_ROOT = '/srv/websites/marcoslealsierra/rootsystem'
PRIVATE_DIR = DOCUMENT_ROOT.replace('rootsystem', 'private')
TMP_DIR = "/tmp"
LOG_FILE = '{}/logs/marcoslealsierra.log'.format(PRIVATE_DIR)
DEFAULT_RESOURCE = "/page/inicio"
SHOW_ERROR_404 = False # Produccion, muestra el recurso por defualt en la raiz
STATIC_DIR = "{}/static".format(DOCUMENT_ROOT)
TEMPLATE_FILE = "{}/html/template.html".format(STATIC_DIR)
URLS_FILE = '{}/urls'.format(PRIVATE_DIR)
# Directorio de sesiones (crear a mano si no existe)
#SESS_DIR = "{}/pysessions".format(PRIVATE_DIR)
#LOGIN_PAGE = "/credencial/login"
# Pagina de acceso restringido
#RESTRICTED_PAGE = "/page/restricted-page"
# Diccionario de diccionarios para el manejo de los mensajes de error
APP_ERRORS = dict()
APP_ERRORS['USER'] = dict(
user='El usuario no puede estar vacio',
name="El nombre completo es requerido",
level='El nivel de acceso es requerido',
email='El email no puede estar vacio',
password='La contraseña es requerida'
)
APP_ERRORS['URLS'] = dict(
url_amigable='La url no puede estar vacia',
uri='La uri no puede estar vacia'
)
|
the-stack_106_29913 | import numpy as np
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover):
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# Check if we have been stuck
#int(Rover.pos[0])
#int(Rover.pos[1])
Rover.q.append([round(Rover.pos[0],1),round(Rover.pos[1],1),int(Rover.yaw)])
print(Rover.q)
#print(Rover.q.count([round(Rover.pos[0],1),round(Rover.pos[1],1),int(Rover.yaw)]))
if Rover.q.count([round(Rover.pos[0],1),round(Rover.pos[1],1),int(Rover.yaw)]) >= 15 and Rover.laststeer == 'right':
Rover.throttle = 0
Rover.brake = 0
Rover.steer = 15
Rover.mode = 'stacked'
print('Stached1')
if Rover.q.count([round(Rover.pos[0], 1), round(Rover.pos[1], 1), int(Rover.yaw)]) >= 15 and Rover.laststeer == 'left':
Rover.throttle = 0
Rover.brake = 0
Rover.steer = -15
Rover.mode = 'stacked'
print('Stacked2')
if Rover.q.count([round(Rover.pos[0], 1), round(Rover.pos[1], 1), int(Rover.yaw)]) < 15:
Rover.mode = 'forward'
# Example:
# Check if we have vision data to make decisions with
if Rover.nav_angles is not None:
# Check for Rover.mode status
if Rover.mode == 'forward':
# Check the extent of navigable terrain
if len(Rover.nav_angles) >= Rover.stop_forward:
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle
if Rover.vel < Rover.max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
# Set steering to average angle clipped to the range +/- 15
Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)
# If there's a lack of navigable terrain pixels then go to 'stop' mode
elif len(Rover.nav_angles) < Rover.stop_forward:
# Set mode to "stop" and hit the brakes!
Rover.throttle = -0.1
# Set brake to stored brake value
Rover.brake = Rover.brake_set
#Rover.throttle = 0
#Rover.steer = 0
Rover.mode = 'stop'
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
# If we're in stop mode but still moving keep braking
if Rover.vel > 0.2:
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif Rover.vel <= 0.2:
#Rover.brake = 0
# Now we're stopped and we have vision data to see if there's a path forward
if len(Rover.nav_angles) < Rover.go_forward:
#Rover.throttle = 0
# Release the brake to allow turning
#Rover.brake = 0
#Rover.throttle = -0.1
print(np.mean(Rover.nav_angles * 180/np.pi))
# Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning
if np.mean(Rover.nav_angles * 180/np.pi)>=-45 or np.mean(Rover.nav_angles * 180/np.pi)=='nan':
#Rover.throttle = 0
#Rover.brake = Rover.brake_set
Rover.brake = 0
Rover.steer = 15
Rover.laststeer = 'left'
#Rover.throttle = -Rover.throttle_set
#Rover.steer = 15 # Could be more clever here about which way to turn
elif np.mean(Rover.nav_angles * 180/np.pi)<-45:
#Rover.throttle = -Rover.throttle_set
#Rover.throttle = 0
#Rover.brake = Rover.brake_set
Rover.brake = 0
Rover.steer = -15
Rover.laststeer = 'right'
else:
if Rover.laststeer == 'right':
Rover.steer = -15
else:
Rover.steer = 15
# If we're stopped but see sufficient navigable terrain in front then go!
if len(Rover.nav_angles) >= Rover.go_forward and Rover.mode !='stacked':
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)
Rover.mode = 'forward'
# Just to make the rover do something
# even if no modifications have been made to the code
#else:
# Rover.throttle = Rover.throttle_set
# Rover.steer = 0
# Rover.brake = 0
# If in a state where want to pickup a rock send pickup command
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
return Rover
|
the-stack_106_29917 | import collections
import json
import six
import numpy as np
from threading import Thread, Event
from ..base import InterfaceBase
from ..setupuploadmixin import SetupUploadMixin
from ...utilities.async_manager import AsyncManagerMixin
from ...utilities.plotly_reporter import create_2d_histogram_plot, create_value_matrix, create_3d_surface, \
create_2d_scatter_series, create_3d_scatter_series, create_line_plot, plotly_scatter3d_layout_dict, \
create_image_plot
from ...utilities.py3_interop import AbstractContextManager
from .events import ScalarEvent, VectorEvent, ImageEvent, PlotEvent, ImageEventNoUpload, UploadEvent
class Reporter(InterfaceBase, AbstractContextManager, SetupUploadMixin, AsyncManagerMixin):
"""
A simple metrics reporter class.
This class caches reports and supports both a explicit flushing and context-based flushing. To ensure reports are
sent to the backend, please use (assuming an instance of Reporter named 'reporter'):
- use the context manager feature (which will automatically flush when exiting the context):
with reporter:
reporter.report...
...
- explicitly call flush:
reporter.report...
...
reporter.flush()
"""
def __init__(self, metrics, flush_threshold=10, async_enable=False):
"""
Create a reporter
:param metrics: A Metrics manager instance that handles actual reporting, uploads etc.
:type metrics: .backend_interface.metrics.Metrics
:param flush_threshold: Events flush threshold. This determines the threshold over which cached reported events
are flushed and sent to the backend.
:type flush_threshold: int
"""
log = metrics.log.getChild('reporter')
log.setLevel(log.level)
super(Reporter, self).__init__(session=metrics.session, log=log)
self._metrics = metrics
self._flush_threshold = flush_threshold
self._events = []
self._bucket_config = None
self._storage_uri = None
self._async_enable = async_enable
self._flush_frequency = 30.0
self._exit_flag = False
self._flush_event = Event()
self._flush_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
self._max_iteration = 0
def _set_storage_uri(self, value):
value = '/'.join(x for x in (value.rstrip('/'), self._metrics.storage_key_prefix) if x)
self._storage_uri = value
storage_uri = property(None, _set_storage_uri)
@property
def flush_threshold(self):
return self._flush_threshold
@flush_threshold.setter
def flush_threshold(self, value):
self._flush_threshold = max(0, value)
@property
def async_enable(self):
return self._async_enable
@async_enable.setter
def async_enable(self, value):
self._async_enable = bool(value)
@property
def max_iteration(self):
return self._max_iteration
def _daemon(self):
while not self._exit_flag:
self._flush_event.wait(self._flush_frequency)
self._flush_event.clear()
self._write()
# wait for all reports
if self.get_num_results() > 0:
self.wait_for_results()
# make sure we flushed everything
self._write()
if self.get_num_results() > 0:
self.wait_for_results()
def _report(self, ev):
ev_iteration = ev.get_iteration()
if ev_iteration is not None:
self._max_iteration = max(self._max_iteration, ev_iteration)
self._events.append(ev)
if len(self._events) >= self._flush_threshold:
self.flush()
def _write(self):
if not self._events:
return
# print('reporting %d events' % len(self._events))
res = self._metrics.write_events(self._events, async_enable=self._async_enable, storage_uri=self._storage_uri)
if self._async_enable:
self._add_async_result(res)
self._events = []
def flush(self):
"""
Flush cached reports to backend.
"""
self._flush_event.set()
def stop(self):
self._exit_flag = True
self._flush_event.set()
self._thread.join()
def report_scalar(self, title, series, value, iter):
"""
Report a scalar value
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param value: Reported value
:type value: float
:param iter: Iteration number
:type value: int
"""
ev = ScalarEvent(metric=self._normalize_name(title),
variant=self._normalize_name(series), value=value, iter=iter)
self._report(ev)
def report_vector(self, title, series, values, iter):
"""
Report a vector of values
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param values: Reported values
:type value: [float]
:param iter: Iteration number
:type value: int
"""
if not isinstance(values, collections.Iterable):
raise ValueError('values: expected an iterable')
ev = VectorEvent(metric=self._normalize_name(title),
variant=self._normalize_name(series), values=values, iter=iter)
self._report(ev)
def report_plot(self, title, series, plot, iter):
"""
Report a Plotly chart
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param plot: A JSON describing a plotly chart (see https://help.plot.ly/json-chart-schema/)
:type plot: str or dict
:param iter: Iteration number
:type value: int
"""
try:
def default(o):
if isinstance(o, np.int64):
return int(o)
except Exception:
default = None
if isinstance(plot, dict):
plot = json.dumps(plot, default=default)
elif not isinstance(plot, six.string_types):
raise ValueError('Plot should be a string or a dict')
ev = PlotEvent(metric=self._normalize_name(title),
variant=self._normalize_name(series), plot_str=plot, iter=iter)
self._report(ev)
def report_image(self, title, series, src, iter):
"""
Report an image.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param src: Image source URI. This URI will be used by the webapp and workers when trying to obtain the image
for presentation of processing. Currently only http(s), file and s3 schemes are supported.
:type src: str
:param iter: Iteration number
:type value: int
"""
ev = ImageEventNoUpload(metric=self._normalize_name(title),
variant=self._normalize_name(series), iter=iter, src=src)
self._report(ev)
def report_image_and_upload(self, title, series, iter, path=None, image=None, upload_uri=None,
max_image_history=None, delete_after_upload=False):
"""
Report an image and upload its contents. Image is uploaded to a preconfigured bucket (see setup_upload()) with
a key (filename) describing the task ID, title, series and iteration.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param iter: Iteration number
:type iter: int
:param path: A path to an image file. Required unless matrix is provided.
:type path: str
:param image: Image data. Required unless filename is provided.
:type image: A PIL.Image.Image object or a 3D numpy.ndarray object
:param max_image_history: maximum number of image to store per metric/variant combination
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
:type delete_after_upload: boolean
"""
if not self._storage_uri and not upload_uri:
raise ValueError('Upload configuration is required (use setup_upload())')
if len([x for x in (path, image) if x is not None]) != 1:
raise ValueError('Expected only one of [filename, image]')
kwargs = dict(metric=self._normalize_name(title),
variant=self._normalize_name(series), iter=iter, image_file_history_size=max_image_history)
ev = ImageEvent(image_data=image, upload_uri=upload_uri, local_image_path=path,
delete_after_upload=delete_after_upload, **kwargs)
self._report(ev)
def report_histogram(self, title, series, histogram, iter, labels=None, xlabels=None,
xtitle=None, ytitle=None, comment=None):
"""
Report an histogram bar plot
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param histogram: The histogram data.
A row for each dataset(bar in a bar group). A column for each bucket.
:type histogram: numpy array
:param iter: Iteration number
:type value: int
:param labels: The labels for each bar group.
:type labels: list of strings.
:param xlabels: The labels of the x axis.
:type xlabels: List of strings.
:param str xtitle: optional x-axis title
:param str ytitle: optional y-axis title
:param comment: comment underneath the title
:type comment: str
"""
plotly_dict = create_2d_histogram_plot(
np_row_wise=histogram,
title=title,
xtitle=xtitle,
ytitle=ytitle,
labels=labels,
series=series,
xlabels=xlabels,
comment=comment,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_line_plot(self, title, series, iter, xtitle, ytitle, mode='lines', reverse_xaxis=False, comment=None):
"""
Report a (possibly multiple) line plot.
:param title: Title (AKA metric)
:type title: str
:param series: All the series' data, one for each line in the plot.
:type series: An iterable of LineSeriesInfo.
:param iter: Iteration number
:type iter: int
:param xtitle: x-axis title
:type xtitle: str
:param ytitle: y-axis title
:type ytitle: str
:param mode: 'lines' / 'markers' / 'lines+markers'
:type mode: str
:param reverse_xaxis: If true X axis will be displayed from high to low (reversed)
:type reverse_xaxis: bool
:param comment: comment underneath the title
:type comment: str
"""
plotly_dict = create_line_plot(
title=title,
series=series,
xtitle=xtitle,
ytitle=ytitle,
mode=mode,
reverse_xaxis=reverse_xaxis,
comment=comment,
)
return self.report_plot(
title=self._normalize_name(title),
series='',
plot=plotly_dict,
iter=iter,
)
def report_2d_scatter(self, title, series, data, iter, mode='lines', xtitle=None, ytitle=None, labels=None,
comment=None):
"""
Report a 2d scatter graph (with lines)
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A scattered data: pairs of x,y as rows in a numpy array
:type scatter: ndarray
:param iter: Iteration number
:type iter: int
:param mode: (type str) 'lines'/'markers'/'lines+markers'
:param xtitle: optional x-axis title
:param ytitle: optional y-axis title
:param labels: label (text) per point in the scatter (in the same order)
:param comment: comment underneath the title
:type comment: str
"""
plotly_dict = create_2d_scatter_series(
np_row_wise=data,
title=title,
series_name=series,
mode=mode,
xtitle=xtitle,
ytitle=ytitle,
labels=labels,
comment=comment,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_3d_scatter(self, title, series, data, iter, labels=None, mode='lines', color=((217, 217, 217, 0.14),),
marker_size=5, line_width=0.8, xtitle=None, ytitle=None, ztitle=None, fill=None,
comment=None):
"""
Report a 3d scatter graph (with markers)
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A scattered data: pairs of x,y,z as rows in a numpy array. or list of numpy arrays
:type data: ndarray.
:param iter: Iteration number
:type iter: int
:param labels: label (text) per point in the scatter (in the same order)
:type labels: str
:param mode: (type str) 'lines'/'markers'/'lines+markers'
:param color: list of RGBA colors [(217, 217, 217, 0.14),]
:param marker_size: marker size in px
:param line_width: line width in px
:param xtitle: optional x-axis title
:param ytitle: optional y-axis title
:param ztitle: optional z-axis title
:param comment: comment underneath the title
"""
data_series = data if isinstance(data, list) else [data]
def get_labels(i):
if labels and isinstance(labels, list):
try:
item = labels[i]
except IndexError:
item = labels[-1]
if isinstance(item, list):
return item
return labels
plotly_obj = plotly_scatter3d_layout_dict(
title=title,
xaxis_title=xtitle,
yaxis_title=ytitle,
zaxis_title=ztitle,
comment=comment,
)
for i, values in enumerate(data_series):
plotly_obj = create_3d_scatter_series(
np_row_wise=values,
title=title,
series_name=series[i] if isinstance(series, list) else None,
labels=get_labels(i),
plotly_obj=plotly_obj,
mode=mode,
line_width=line_width,
marker_size=marker_size,
color=color,
fill_axis=fill,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series) if not isinstance(series, list) else None,
plot=plotly_obj,
iter=iter,
)
def report_value_matrix(self, title, series, data, iter, xtitle=None, ytitle=None, xlabels=None, ylabels=None, comment=None):
"""
Report a heat-map matrix
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A heat-map matrix (example: confusion matrix)
:type data: ndarray
:param iter: Iteration number
:type iter: int
:param str xtitle: optional x-axis title
:param str ytitle: optional y-axis title
:param xlabels: optional label per column of the matrix
:param ylabels: optional label per row of the matrix
:param comment: comment underneath the title
"""
plotly_dict = create_value_matrix(
np_value_matrix=data,
title=title,
xlabels=xlabels,
ylabels=ylabels,
series=series,
comment=comment,
xtitle=xtitle,
ytitle=ytitle,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_value_surface(self, title, series, data, iter, xlabels=None, ylabels=None,
xtitle=None, ytitle=None, ztitle=None, camera=None, comment=None):
"""
Report a 3d surface (same data as heat-map matrix, only presented differently)
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A heat-map matrix (example: confusion matrix)
:type data: ndarray
:param iter: Iteration number
:type iter: int
:param xlabels: optional label per column of the matrix
:param ylabels: optional label per row of the matrix
:param xtitle: optional x-axis title
:param ytitle: optional y-axis title
:param ztitle: optional z-axis title
:param camera: X,Y,Z camera position. def: (1,1,1)
:param comment: comment underneath the title
"""
plotly_dict = create_3d_surface(
np_value_matrix=data,
title=title + '/' + series,
xlabels=xlabels,
ylabels=ylabels,
series=series,
xtitle=xtitle,
ytitle=ytitle,
ztitle=ztitle,
camera=camera,
comment=comment,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_image_plot_and_upload(self, title, series, iter, path=None, matrix=None,
upload_uri=None, max_image_history=None, delete_after_upload=False):
"""
Report an image as plot and upload its contents.
Image is uploaded to a preconfigured bucket (see setup_upload()) with a key (filename)
describing the task ID, title, series and iteration.
Then a plotly object is created and registered, this plotly objects points to the uploaded image
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param iter: Iteration number
:type value: int
:param path: A path to an image file. Required unless matrix is provided.
:type path: str
:param matrix: A 3D numpy.ndarray object containing image data (RGB). Required unless filename is provided.
:type matrix: str
:param max_image_history: maximum number of image to store per metric/variant combination
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
:type delete_after_upload: boolean
"""
if not upload_uri and not self._storage_uri:
raise ValueError('Upload configuration is required (use setup_upload())')
if len([x for x in (path, matrix) if x is not None]) != 1:
raise ValueError('Expected only one of [filename, matrix]')
kwargs = dict(metric=self._normalize_name(title),
variant=self._normalize_name(series), iter=iter, image_file_history_size=max_image_history)
ev = UploadEvent(image_data=matrix, upload_uri=upload_uri, local_image_path=path,
delete_after_upload=delete_after_upload, **kwargs)
_, url = ev.get_target_full_upload_uri(upload_uri or self._storage_uri, self._metrics.storage_key_prefix)
# Hack: if the url doesn't start with http/s then the plotly will not be able to show it,
# then we put the link under images not plots
if not url.startswith('http'):
return self.report_image_and_upload(title=title, series=series, iter=iter, path=path, image=matrix,
upload_uri=upload_uri, max_image_history=max_image_history)
self._report(ev)
plotly_dict = create_image_plot(
image_src=url,
title=title + '/' + series,
width=matrix.shape[1] if matrix is not None else 640,
height=matrix.shape[0] if matrix is not None else 480,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
@classmethod
def _normalize_name(cls, name):
if not name:
return name
return name.replace('$', '/').replace('.', '/')
def __exit__(self, exc_type, exc_val, exc_tb):
# don't flush in case an exception was raised
if not exc_type:
self.flush()
|
the-stack_106_29918 | import json
from datetime import datetime, timedelta
from celery import Celery
from flask import render_template, request, url_for
from sqlalchemy.exc import OperationalError
from werkzeug.utils import redirect
from bunkmeet import app, db
from bunkmeet.one import bunk
from bunkmeet.two import covid
from bunkmeet.models import User, Teams, Lecture
def make_celery(job):
celery_job = Celery(
job.import_name,
backend=job.config['CELERY_BACKEND'],
broker=job.config['CELERY_BROKER_URL']
)
celery_job.conf.update(job.config)
class ContextTask(celery_job.Task):
def __call__(self, *args, **kwargs):
with job.app_context():
return self.run(*args, **kwargs)
celery_job.Task = ContextTask
return celery_job
celery = make_celery(app)
@app.route('/', methods=['GET', 'POST'])
@app.route('/home', methods=['GET', 'POST'])
def attend():
try:
global team
team = Teams.query.order_by(Teams.id.desc()).first()
except OperationalError:
db.create_all()
team = Teams.query.order_by(Teams.id.desc()).first()
try:
load = json.loads(team.teams)
data = load['array']
return render_template('success.html', data=data)
except AttributeError:
global message
message = "Please log in first"
return redirect(url_for('error'))
@app.route('/login')
def login():
return render_template('index.html')
@app.route('/success', methods=['GET', 'POST'])
def fetch_input():
email = request.form.get("email")
password = request.form.get("pass")
user = User(
email=email,
password=password)
db.session.add(user)
db.session.commit()
fetch_teams = bunk(email, password)
if fetch_teams == 'Invalid credentials':
global message
message = fetch_teams
return redirect(url_for('error'))
else:
array = {"array": fetch_teams}
dump = json.dumps(array)
teams = Teams(teams=dump)
db.session.add(teams)
db.session.commit()
return render_template('success.html', data=fetch_teams)
@app.route('/error')
def error():
return render_template('error.html', message=message)
# DO NOT USE THE TEST ROUTE IN PRODUCTION
@app.route('/test')
def test():
data = ['Test team 1', 'Test team2']
return render_template('success.html', data=data)
@app.route('/finish', methods=['GET', 'POST'])
def finish():
TEAM = request.form.get("team")
DATE = request.form.get("date")
TIME = request.form.get("time")
DURATION = request.form.get("duration")
SPEED = request.form.get("speed")
lecture = Lecture(
team=TEAM,
date=DATE,
time=TIME,
duration=DURATION,
speed=SPEED
)
db.session.add(lecture)
db.session.commit()
final.delay()
return render_template('finish.html')
@celery.task()
def final():
user = User.query.order_by(User.id.desc()).first()
email = user.email
password = user.password
worker = Lecture.query.order_by(Lecture.id.desc()).first()
team = worker.team
date = worker.date
time = worker.time
duration = worker.duration
speed = worker.speed
combine = date + time
strp = datetime.strptime(combine, '%Y-%m-%d%H:%M')
until = strp + timedelta(minutes=int(duration))
covid(email, password, team, strp, until, speed)
|
the-stack_106_29921 | from __future__ import absolute_import, division, print_function, unicode_literals
SETTINGS_EXCEPTIONS = set(['load', 'map', 'new', 'start'])
def _merge_or_diff(old, new, is_merge, require_old_key, path='',
require_old_key_exceptions=None):
"""Merges two dictionaries, mutating the dictionary "old"."""
nothing = ()
require_old_key_exceptions = require_old_key_exceptions or set()
if old is None:
old = {}
require_old_key = False
else:
# old = copy.deepcopy(old)
pass
import six
for key, new_v in six.iteritems(new or {}):
new_path = '%s:%s' % (path, key)
old_v = old.get(key, nothing)
if old_v is nothing:
if is_merge:
if require_old_key and (key not in require_old_key_exceptions):
raise Exception(
'Tried to override non-existent key ' + new_path)
else:
old[key] = new_v
else:
continue
if isinstance(old_v, dict):
if isinstance(new_v, dict):
is_exception = key in require_old_key_exceptions
_merge_or_diff(old_v, new_v, is_merge,
require_old_key and not is_exception, new_path)
else:
raise Exception(
'Tried to override dict with non-dict for key ' + new_path)
elif not isinstance(new_v, dict):
if is_merge:
old[key] = new_v
else:
del old[key]
elif require_old_key:
raise Exception('Tried to override non-dict with dict for key ' +
new_path)
elif is_merge:
old[key] = new_v
return old
def difference_strict(old, new):
return _merge_or_diff(old, new, False, True)
def difference(old, new):
return _merge_or_diff(old, new, False, False)
def merge_strict(*others, **kwds):
def _merge(old, new):
return _merge_or_diff(old, new, True, True)
return reduce(_merge, others + (kwds, ), None)
def merge_strict_with_exceptions(exceptions, *others):
def _merge(old, new):
return _merge_or_diff(old, new, True, True,
require_old_key_exceptions=exceptions)
return reduce(_merge, others, None)
def merge(*others, **kwds):
def _merge(old, new):
return _merge_or_diff(old, new, True, False)
return reduce(_merge, others + (kwds, ), None)
def merge_for_settings(*settings):
return merge_strict_with_exceptions(SETTINGS_EXCEPTIONS, *settings)
|
the-stack_106_29922 | import os
from django.contrib.staticfiles import finders
from django.core.files.storage import FileSystemStorage
from django.test import TestCase
from pipeline.collector import default_collector
from pipeline.finders import PipelineFinder
def local_path(path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', path))
class CollectorTest(TestCase):
def tearDown(self):
super(CollectorTest, self).tearDown()
default_collector.clear()
def test_collect(self):
self.assertEqual(
set(default_collector.collect()),
set(self._get_collectable_files()))
def test_collect_with_files(self):
self.assertEqual(
set(default_collector.collect(files=[
'pipeline/js/first.js',
'pipeline/js/second.js',
])),
set([
'pipeline/js/first.js',
'pipeline/js/second.js',
]))
def test_delete_file_with_modified(self):
list(default_collector.collect())
storage = FileSystemStorage(local_path('assets'))
new_mtime = os.path.getmtime(storage.path('js/first.js')) - 1000
os.utime(default_collector.storage.path('pipeline/js/first.js'),
(new_mtime, new_mtime))
self.assertTrue(default_collector.delete_file(
'js/first.js', 'pipeline/js/first.js', storage))
def test_delete_file_with_unmodified(self):
list(default_collector.collect(files=['pipeline/js/first.js']))
self.assertFalse(default_collector.delete_file(
'js/first.js', 'pipeline/js/first.js',
FileSystemStorage(local_path('assets'))))
def _get_collectable_files(self):
for finder in finders.get_finders():
if not isinstance(finder, PipelineFinder):
for path, storage in finder.list(['CVS', '.*', '*~']):
if getattr(storage, 'prefix', None):
yield os.path.join(storage.prefix, path)
else:
yield path
|
the-stack_106_29927 | from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw as gw_c
mol = gto.M( verbose = 1, atom = '''Al 0.0 0.0 0.0''', basis = 'cc-pvdz', spin = 1, )
gto_mf = scf.UHF(mol)
e_tot = gto_mf.kernel()
class KnowValues(unittest.TestCase):
def test_0066_al_atom(self):
""" Spin-resolved case GW procedure. """
gw = gw_c(mf=gto_mf, gto=mol, verbosity=0, niter_max_ev=16, nocc=3, nvrt=3)
self.assertEqual(gw.nspin, 2)
gw.kernel_gw()
#gw.report()
np.savetxt('eigvals_g0w0_pyscf_rescf_al_0066.txt', gw.mo_energy_gw[0,:,:].T)
#ev_ref = np.loadtxt('eigvals_g0w0_pyscf_rescf_al_0066.txt-ref').T
#for n2e,n2r in zip(gw.mo_energy_gw[0], ev_ref):
# for e,r in zip(n2e,n2r): self.assertAlmostEqual(e, r)
if __name__ == "__main__": unittest.main()
|
the-stack_106_29929 | import serial
import time
ser = None
def sendData(data):
data += "\r\n"
ser.write(data.encode())
def main():
global ser
ser = serial.Serial('/dev/ttyUSB0', 9600)
data = "dorin-is-cool"
while 1:
#misc code here
sendData(data)
time.sleep(10)
main()
|
the-stack_106_29931 | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import WhiteBoxMath as whiteBoxMath
import WhiteBoxInit as init
import azlmbr.legacy.general as general
import azlmbr.bus as bus
import azlmbr.whitebox.api as api
# usage: pyRunFile path/to/file/icosahedron.py <radius>
# create the faces which will be used in the icosahedron
def create_icosahedron_faces(whiteBoxMesh, radius):
# get coordinates for all the vertices using the internal angles of a icosahedron
# upper side
pos1 = whiteBoxMath.spherical_to_cartesian(0.0, 0.0, radius)
pos2 = whiteBoxMath.spherical_to_cartesian(63.43, 18.0, radius)
pos3 = whiteBoxMath.spherical_to_cartesian(63.43, 90.0, radius)
pos4 = whiteBoxMath.spherical_to_cartesian(63.43, 162.0, radius)
pos5 = whiteBoxMath.spherical_to_cartesian(63.43, 234.0, radius)
pos6 = whiteBoxMath.spherical_to_cartesian(63.43, 306.0, radius)
# lower side
pos7 = whiteBoxMath.spherical_to_cartesian(180.0, 0.0, radius)
pos8 = whiteBoxMath.spherical_to_cartesian(116.57, 52.0, radius)
pos9 = whiteBoxMath.spherical_to_cartesian(116.57, 126.0, radius)
pos10 = whiteBoxMath.spherical_to_cartesian(116.57, 198.0, radius)
pos11 = whiteBoxMath.spherical_to_cartesian(116.57, 270.0, radius)
pos12 = whiteBoxMath.spherical_to_cartesian(116.57, 342.0, radius)
# create vertices from all the coordinates
# upper side
v1 = whiteBoxMesh.AddVertex(pos1)
v2 = whiteBoxMesh.AddVertex(pos2)
v3 = whiteBoxMesh.AddVertex(pos3)
v4 = whiteBoxMesh.AddVertex(pos4)
v5 = whiteBoxMesh.AddVertex(pos5)
v6 = whiteBoxMesh.AddVertex(pos6)
# lower side
v7 = whiteBoxMesh.AddVertex(pos7)
v8 = whiteBoxMesh.AddVertex(pos8)
v9 = whiteBoxMesh.AddVertex(pos9)
v10 = whiteBoxMesh.AddVertex(pos10)
v11 = whiteBoxMesh.AddVertex(pos11)
v12 = whiteBoxMesh.AddVertex(pos12)
# add faces to list
faces = []
# upper side
fvh1 = faces.append(api.util_MakeFaceVertHandles(v1, v2, v3))
fvh2 = faces.append(api.util_MakeFaceVertHandles(v1, v3, v4))
fvh3 = faces.append(api.util_MakeFaceVertHandles(v1, v4, v5))
fvh4 = faces.append(api.util_MakeFaceVertHandles(v1, v5, v6))
fvh5 = faces.append(api.util_MakeFaceVertHandles(v1, v6, v2))
# lower side
fvh6 = faces.append(api.util_MakeFaceVertHandles(v7, v12, v11))
fvh7 = faces.append(api.util_MakeFaceVertHandles(v7, v11, v10))
fvh8 = faces.append(api.util_MakeFaceVertHandles(v7, v10, v9))
fvh9 = faces.append(api.util_MakeFaceVertHandles(v7, v9, v8))
fvh10 = faces.append(api.util_MakeFaceVertHandles(v7, v8, v12))
# middle side
fvh11 = faces.append(api.util_MakeFaceVertHandles(v12, v8, v2))
fvh12 = faces.append(api.util_MakeFaceVertHandles(v8, v9, v3))
fvh13 = faces.append(api.util_MakeFaceVertHandles(v9, v10, v4))
fvh14 = faces.append(api.util_MakeFaceVertHandles(v10, v11, v5))
fvh15 = faces.append(api.util_MakeFaceVertHandles(v11, v12, v6))
fvh16 = faces.append(api.util_MakeFaceVertHandles(v2, v8, v3))
fvh17 = faces.append(api.util_MakeFaceVertHandles(v3, v9, v4))
fvh18 = faces.append(api.util_MakeFaceVertHandles(v4, v10, v5))
fvh19 = faces.append(api.util_MakeFaceVertHandles(v5, v11, v6))
fvh20 = faces.append(api.util_MakeFaceVertHandles(v6, v12, v2))
return faces
def create_icosahedron(whiteBoxMesh, radius=0.6):
# create list of faces to add to polygon
icosahedron_faces = create_icosahedron_faces(whiteBoxMesh, radius)
# add polygons to white box mesh
for face in icosahedron_faces:
whiteBoxMesh.AddPolygon([face])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Creates an icosahedron.')
parser.add_argument('radius', nargs='?', default=0.6, type=int, help='radius of the icosahedron')
args = parser.parse_args()
whiteBoxEntity = init.create_white_box_entity("WhiteBox-Icosahedron")
whiteBoxMeshComponent = init.create_white_box_component(whiteBoxEntity)
whiteBoxMesh = init.create_white_box_handle(whiteBoxMeshComponent)
# clear whiteBoxMesh to make a icosahedron from scratch
whiteBoxMesh.Clear()
create_icosahedron(whiteBoxMesh, args.radius)
# update whiteBoxMesh
init.update_white_box(whiteBoxMesh, whiteBoxMeshComponent)
|
the-stack_106_29934 | #!/usr/bin/env python3
#
# Copyright (c) 2017, Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: ai:ts=4:sw=4
import sys
from os import listdir
import os, fnmatch
import re
import yaml
import argparse
import collections
from devicetree import parse_file
from extract.globals import *
class Loader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.realpath(stream.name)
super(Loader, self).__init__(stream)
Loader.add_constructor('!include', Loader.include)
Loader.add_constructor('!import', Loader.include)
def include(self, node):
if isinstance(node, yaml.ScalarNode):
return self.extractFile(self.construct_scalar(node))
elif isinstance(node, yaml.SequenceNode):
result = []
for filename in self.construct_sequence(node):
result += self.extractFile(filename)
return result
elif isinstance(node, yaml.MappingNode):
result = {}
for k, v in self.construct_mapping(node).iteritems():
result[k] = self.extractFile(v)
return result
else:
print("Error:: unrecognised node type in !include statement")
raise yaml.constructor.ConstructorError
def extractFile(self, filename):
filepath = os.path.join(os.path.dirname(self._root), filename)
if not os.path.isfile(filepath):
# we need to look in bindings/* directories
# take path and back up 1 directory and parse in '/bindings/*'
filepath = os.path.dirname(os.path.dirname(self._root))
for root, dirnames, file in os.walk(filepath):
if fnmatch.filter(file, filename):
filepath = os.path.join(root, filename)
with open(filepath, 'r') as f:
return yaml.load(f, Loader)
def find_parent_irq_node(node_address):
address = ''
for comp in node_address.split('/')[1:]:
address += '/' + comp
if 'interrupt-parent' in reduced[address]['props']:
interrupt_parent = reduced[address]['props'].get(
'interrupt-parent')
return phandles[interrupt_parent]
def extract_interrupts(node_address, yaml, y_key, names, defs, def_label):
node = reduced[node_address]
try:
props = list(node['props'].get(y_key))
except:
props = [node['props'].get(y_key)]
irq_parent = find_parent_irq_node(node_address)
l_base = def_label.split('/')
index = 0
while props:
prop_def = {}
prop_alias = {}
l_idx = [str(index)]
try:
name = [convert_string_to_label(names.pop(0))]
except:
name = []
cell_yaml = yaml[get_compat(irq_parent)]
l_cell_prefix = [yaml[get_compat(irq_parent)].get(
'cell_string', []).upper()]
for i in range(reduced[irq_parent]['props']['#interrupt-cells']):
l_cell_name = [cell_yaml['#cells'][i].upper()]
if l_cell_name == l_cell_prefix:
l_cell_name = []
l_fqn = '_'.join(l_base + l_cell_prefix + l_idx + l_cell_name)
prop_def[l_fqn] = props.pop(0)
if len(name):
alias_list = l_base + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias_list = [alias_label] + l_cell_prefix + name + l_cell_name
prop_alias['_'.join(alias_list)] = l_fqn
index += 1
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_reg_prop(node_address, names, defs, def_label, div, post_label):
reg = reduced[node_address]['props']['reg']
if type(reg) is not list: reg = [ reg ]
props = list(reg)
address_cells = reduced['/']['props'].get('#address-cells')
size_cells = reduced['/']['props'].get('#size-cells')
address = ''
for comp in node_address.split('/')[1:-1]:
address += '/' + comp
address_cells = reduced[address]['props'].get(
'#address-cells', address_cells)
size_cells = reduced[address]['props'].get('#size-cells', size_cells)
if post_label is None:
post_label = "BASE_ADDRESS"
index = 0
l_base = def_label.split('/')
l_addr = [convert_string_to_label(post_label)]
l_size = ["SIZE"]
while props:
prop_def = {}
prop_alias = {}
addr = 0
size = 0
l_idx = [str(index)]
try:
name = [names.pop(0).upper()]
except:
name = []
for x in range(address_cells):
addr += props.pop(0) << (32 * x)
for x in range(size_cells):
size += props.pop(0) << (32 * x)
l_addr_fqn = '_'.join(l_base + l_addr + l_idx)
l_size_fqn = '_'.join(l_base + l_size + l_idx)
if address_cells:
prop_def[l_addr_fqn] = hex(addr)
if size_cells:
prop_def[l_size_fqn] = int(size / div)
if len(name):
if address_cells:
prop_alias['_'.join(l_base + name + l_addr)] = l_addr_fqn
if size_cells:
prop_alias['_'.join(l_base + name + l_size)] = l_size_fqn
if index == 0:
if address_cells:
prop_alias['_'.join(l_base + l_addr)] = l_addr_fqn
if size_cells:
prop_alias['_'.join(l_base + l_size)] = l_size_fqn
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias_addr = [alias_label] + l_addr
alias_size = [alias_label] + l_size
prop_alias['_'.join(alias_addr)] = '_'.join(l_base + l_addr)
prop_alias['_'.join(alias_size)] = '_'.join(l_base + l_size)
insert_defs(node_address, defs, prop_def, prop_alias)
# increment index for definition creation
index += 1
def extract_controller(node_address, y_key, prefix, defs, def_label):
try:
props = list(reduced[node_address]['props'].get(y_key))
except:
props = reduced[node_address]['props'].get(y_key)
# get controller node (referenced via phandle)
cell_parent = phandles[props[0]]
try:
l_cell = reduced[cell_parent]['props'].get('label')
except KeyError:
l_cell = None
if l_cell is not None:
l_base = def_label.split('/')
l_base += prefix
prop_def = {}
prop_alias = {}
for k in reduced[cell_parent]['props']:
if 'controller' in k:
l_cellname = convert_string_to_label(str(k))
label = l_base + [l_cellname]
prop_def['_'.join(label)] = "\"" + l_cell + "\""
#generate defs also if node is referenced as an alias in dts
if node_address in aliases:
for i in aliases[node_address]:
alias_label = \
convert_string_to_label(i)
alias = [alias_label] + label[1:]
prop_alias['_'.join(alias)] = '_'.join(label)
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_cells(node_address, yaml, y_key, names, index, prefix, defs,
def_label):
try:
props = list(reduced[node_address]['props'].get(y_key))
except:
props = reduced[node_address]['props'].get(y_key)
cell_parent = phandles[props.pop(0)]
try:
cell_yaml = yaml[get_compat(cell_parent)]
except:
raise Exception(
"Could not find yaml description for " +
reduced[cell_parent]['name'])
try:
name = names.pop(0).upper()
except:
name = []
l_cell = [str(cell_yaml.get('cell_string', ''))]
l_base = def_label.split('/')
l_base += prefix
l_idx = [str(index)]
prop_def = {}
prop_alias = {}
for k in reduced[cell_parent]['props'].keys():
if k[0] == '#' and '-cells' in k:
for i in range(reduced[cell_parent]['props'].get(k)):
l_cellname = [str(cell_yaml['#cells'][i]).upper()]
if l_cell == l_cellname:
label = l_base + l_cell + l_idx
else:
label = l_base + l_cell + l_cellname + l_idx
label_name = l_base + name + l_cellname
prop_def['_'.join(label)] = props.pop(0)
if len(name):
prop_alias['_'.join(label_name)] = '_'.join(label)
if index == 0:
prop_alias['_'.join(label[:-1])] = '_'.join(label)
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = [alias_label] + label[1:-1]
prop_alias['_'.join(alias)] = '_'.join(label[:-1])
insert_defs(node_address, defs, prop_def, prop_alias)
# recurse if we have anything left
if len(props):
extract_cells(node_address, yaml, y_key, names,
index + 1, prefix, defs, def_label)
def extract_pinctrl(node_address, yaml, pinconf, names, index, defs,
def_label):
prop_list = []
if not isinstance(pinconf, list):
prop_list.append(pinconf)
else:
prop_list = list(pinconf)
def_prefix = def_label.split('_')
prop_def = {}
for p in prop_list:
pin_node_address = phandles[p]
pin_subnode = '/'.join(pin_node_address.split('/')[-1:])
cell_yaml = yaml[get_compat(pin_node_address)]
cell_prefix = cell_yaml.get('cell_string', None)
post_fix = []
if cell_prefix is not None:
post_fix.append(cell_prefix)
for subnode in reduced.keys():
if pin_subnode in subnode and pin_node_address != subnode:
# found a subnode underneath the pinmux handle
pin_label = def_prefix + post_fix + subnode.split('/')[-2:]
for i, cells in enumerate(reduced[subnode]['props']):
key_label = list(pin_label) + \
[cell_yaml['#cells'][0]] + [str(i)]
func_label = key_label[:-2] + \
[cell_yaml['#cells'][1]] + [str(i)]
key_label = convert_string_to_label('_'.join(key_label))
func_label = convert_string_to_label('_'.join(func_label))
prop_def[key_label] = cells
prop_def[func_label] = \
reduced[subnode]['props'][cells]
insert_defs(node_address, defs, prop_def, {})
def extract_single(node_address, yaml, prop, key, prefix, defs, def_label):
prop_def = {}
prop_alias = {}
if isinstance(prop, list):
for i, p in enumerate(prop):
k = convert_string_to_label(key)
label = def_label + '_' + k
if isinstance(p, str):
p = "\"" + p + "\""
prop_def[label + '_' + str(i)] = p
else:
k = convert_string_to_label(key)
label = def_label + '_' + k
if prop == 'parent-label':
prop = find_parent_prop(node_address, 'label')
if isinstance(prop, str):
prop = "\"" + prop + "\""
prop_def[label] = prop
# generate defs for node aliases
if node_address in aliases:
for i in aliases[node_address]:
alias_label = convert_string_to_label(i)
alias = alias_label + '_' + k
prop_alias[alias] = label
insert_defs(node_address, defs, prop_def, prop_alias)
def extract_string_prop(node_address, yaml, key, label, defs):
prop_def = {}
node = reduced[node_address]
prop = node['props'][key]
k = convert_string_to_label(key)
prop_def[label] = "\"" + prop + "\""
if node_address in defs:
defs[node_address].update(prop_def)
else:
defs[node_address] = prop_def
def extract_property(node_compat, yaml, node_address, y_key, y_val, names,
prefix, defs, label_override):
if 'base_label' in yaml[node_compat]:
def_label = yaml[node_compat].get('base_label')
else:
def_label = get_node_label(node_compat, node_address)
if 'parent' in yaml[node_compat]:
if 'bus' in yaml[node_compat]['parent']:
# get parent label
parent_address = ''
for comp in node_address.split('/')[1:-1]:
parent_address += '/' + comp
#check parent has matching child bus value
try:
parent_yaml = \
yaml[reduced[parent_address]['props']['compatible']]
parent_bus = parent_yaml['child']['bus']
except (KeyError, TypeError) as e:
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as bus master but " +
str(parent_address) + " not configured as bus master " +
"in yaml description")
if parent_bus != yaml[node_compat]['parent']['bus']:
bus_value = yaml[node_compat]['parent']['bus']
raise Exception(str(node_address) + " defines parent " +
str(parent_address) + " as " + bus_value +
" bus master but " + str(parent_address) +
" configured as " + str(parent_bus) +
" bus master")
# Generate alias definition if parent has any alias
if parent_address in aliases:
for i in aliases[parent_address]:
node_alias = i + '_' + def_label
aliases[node_address].append(node_alias)
# Use parent label to generate label
parent_label = get_node_label(
find_parent_prop(node_address,'compatible') , parent_address)
def_label = parent_label + '_' + def_label
# Generate bus-name define
extract_single(node_address, yaml, 'parent-label',
'bus-name', prefix, defs, def_label)
if label_override is not None:
def_label += '_' + label_override
if y_key == 'reg':
extract_reg_prop(node_address, names, defs, def_label,
1, y_val.get('label', None))
elif y_key == 'interrupts' or y_key == 'interupts-extended':
extract_interrupts(node_address, yaml, y_key, names, defs, def_label)
elif 'pinctrl-' in y_key:
p_index = int(y_key.split('-')[1])
extract_pinctrl(node_address, yaml,
reduced[node_address]['props'][y_key],
names[p_index], p_index, defs, def_label)
elif 'clocks' in y_key or 'gpios' in y_key:
extract_controller(node_address, y_key, prefix, defs, def_label)
extract_cells(node_address, yaml, y_key,
names, 0, prefix, defs, def_label)
else:
extract_single(node_address, yaml,
reduced[node_address]['props'][y_key], y_key,
prefix, defs, def_label)
def extract_node_include_info(reduced, root_node_address, sub_node_address,
yaml, defs, structs, y_sub):
node = reduced[sub_node_address]
node_compat = get_compat(root_node_address)
label_override = None
if node_compat not in yaml.keys():
return {}, {}
if y_sub is None:
y_node = yaml[node_compat]
else:
y_node = y_sub
if yaml[node_compat].get('use-property-label', False):
try:
label = y_node['properties']['label']
label_override = convert_string_to_label(node['props']['label'])
except KeyError:
pass
# check to see if we need to process the properties
for k, v in y_node['properties'].items():
if 'properties' in v:
for c in reduced:
if root_node_address + '/' in c:
extract_node_include_info(
reduced, root_node_address, c, yaml, defs, structs,
v)
if 'generation' in v:
prefix = []
if v.get('use-name-prefix') is not None:
prefix = [convert_string_to_label(k)]
for c in node['props'].keys():
if c.endswith("-names"):
pass
if re.match(k + '$', c):
if 'pinctrl-' in c:
names = node['props'].get('pinctrl-names', [])
else:
names = node['props'].get(c[:-1] + '-names', [])
if not names:
names = node['props'].get(c + '-names', [])
if not isinstance(names, list):
names = [names]
extract_property(
node_compat, yaml, sub_node_address, c, v, names,
prefix, defs, label_override)
def dict_merge(dct, merge_dct):
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def yaml_traverse_inherited(node):
""" Recursive overload procedure inside ``node``
``inherits`` section is searched for and used as node base when found.
Base values are then overloaded by node values
Additionally, 'id' key of 'inherited' dict is converted to 'node_type'
:param node:
:return: node
"""
if 'inherits' in node.keys():
if 'id' in node['inherits'].keys():
node['inherits']['node_type'] = node['inherits']['id']
node['inherits'].pop('id')
if 'inherits' in node['inherits'].keys():
node['inherits'] = yaml_traverse_inherited(node['inherits'])
dict_merge(node['inherits'], node)
node = node['inherits']
node.pop('inherits')
return node
def yaml_collapse(yaml_list):
collapsed = dict(yaml_list)
for k, v in collapsed.items():
v = yaml_traverse_inherited(v)
collapsed[k]=v
return collapsed
def get_key_value(k, v, tabstop):
label = "#define " + k
# calculate the name's tabs
if len(label) % 8:
tabs = (len(label) + 7) >> 3
else:
tabs = (len(label) >> 3) + 1
line = label
for i in range(0, tabstop - tabs + 1):
line += '\t'
line += str(v)
line += '\n'
return line
def output_keyvalue_lines(fd, defs):
node_keys = sorted(defs.keys())
for node in node_keys:
fd.write('# ' + node.split('/')[-1])
fd.write("\n")
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
fd.write("%s=%s\n" % (entry, defs[node].get(a)))
else:
fd.write("%s=%s\n" % (prop, defs[node].get(prop)))
fd.write("\n")
def generate_keyvalue_file(defs, kv_file):
with open(kv_file, "w") as fd:
output_keyvalue_lines(fd, defs)
def output_include_lines(fd, defs, fixups):
compatible = reduced['/']['props']['compatible'][0]
fd.write("/**************************************************\n")
fd.write(" * Generated include file for " + compatible)
fd.write("\n")
fd.write(" * DO NOT MODIFY\n")
fd.write(" */\n")
fd.write("\n")
fd.write("#ifndef _DEVICE_TREE_BOARD_H" + "\n")
fd.write("#define _DEVICE_TREE_BOARD_H" + "\n")
fd.write("\n")
node_keys = sorted(defs.keys())
for node in node_keys:
fd.write('/* ' + node.split('/')[-1] + ' */')
fd.write("\n")
max_dict_key = lambda d: max(len(k) for k in d.keys())
maxlength = 0
if defs[node].get('aliases'):
maxlength = max_dict_key(defs[node]['aliases'])
maxlength = max(maxlength, max_dict_key(defs[node])) + len('#define ')
if maxlength % 8:
maxtabstop = (maxlength + 7) >> 3
else:
maxtabstop = (maxlength >> 3) + 1
if (maxtabstop * 8 - maxlength) <= 2:
maxtabstop += 1
prop_keys = sorted(defs[node].keys())
for prop in prop_keys:
if prop == 'aliases':
for entry in sorted(defs[node][prop]):
a = defs[node][prop].get(entry)
fd.write(get_key_value(entry, a, maxtabstop))
else:
fd.write(get_key_value(prop, defs[node].get(prop), maxtabstop))
fd.write("\n")
if fixups:
for fixup in fixups:
if os.path.exists(fixup):
fd.write("\n")
fd.write(
"/* Following definitions fixup the generated include */\n")
try:
with open(fixup, "r") as fixup_fd:
for line in fixup_fd.readlines():
fd.write(line)
fd.write("\n")
except:
raise Exception(
"Input file " + os.path.abspath(fixup) +
" does not exist.")
fd.write("#endif\n")
def generate_include_file(defs, inc_file, fixups):
with open(inc_file, "w") as fd:
output_include_lines(fd, defs, fixups)
def load_and_parse_dts(dts_file):
with open(dts_file, "r") as fd:
dts = parse_file(fd)
return dts
def load_yaml_descriptions(dts, yaml_dir):
compatibles = get_all_compatibles(dts['/'], '/', {})
# find unique set of compatibles across all active nodes
s = set()
for k, v in compatibles.items():
if isinstance(v, list):
for item in v:
s.add(item)
else:
s.add(v)
# scan YAML files and find the ones we are interested in
yaml_files = []
for root, dirnames, filenames in os.walk(yaml_dir):
for filename in fnmatch.filter(filenames, '*.yaml'):
yaml_files.append(os.path.join(root, filename))
yaml_list = {}
file_load_list = set()
for file in yaml_files:
for line in open(file, 'r'):
if re.search('^\s+constraint:*', line):
c = line.split(':')[1].strip()
c = c.strip('"')
if c in s:
if file not in file_load_list:
file_load_list.add(file)
with open(file, 'r') as yf:
yaml_list[c] = yaml.load(yf, Loader)
if yaml_list == {}:
raise Exception("Missing YAML information. Check YAML sources")
# collapse the yaml inherited information
yaml_list = yaml_collapse(yaml_list)
return yaml_list
def lookup_defs(defs, node, key):
if node not in defs:
return None
if key in defs[node]['aliases']:
key = defs[node]['aliases'][key]
return defs[node].get(key, None)
def generate_node_definitions(yaml_list):
defs = {}
structs = {}
for k, v in reduced.items():
node_compat = get_compat(k)
if node_compat is not None and node_compat in yaml_list:
extract_node_include_info(
reduced, k, k, yaml_list, defs, structs, None)
if defs == {}:
raise Exception("No information parsed from dts file.")
for k, v in regs_config.items():
if k in chosen:
extract_reg_prop(chosen[k], None, defs, v, 1024, None)
for k, v in name_config.items():
if k in chosen:
extract_string_prop(chosen[k], None, "label", v, defs)
# This should go away via future DTDirective class
if 'zephyr,flash' in chosen:
load_defs = {}
node_addr = chosen['zephyr,flash']
flash_keys = ["label", "write-block-size", "erase-block-size"]
for key in flash_keys:
if key in reduced[node_addr]['props']:
prop = reduced[node_addr]['props'][key]
extract_single(node_addr, None, prop, key, None, defs, "FLASH")
# only compute the load offset if a code partition exists and
# it is not the same as the flash base address
if 'zephyr,code-partition' in chosen and \
reduced[chosen['zephyr,flash']] is not \
reduced[chosen['zephyr,code-partition']]:
part_defs = {}
extract_reg_prop(chosen['zephyr,code-partition'], None,
part_defs, "PARTITION", 1, 'offset')
part_base = lookup_defs(part_defs,
chosen['zephyr,code-partition'],
'PARTITION_OFFSET')
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = part_base
load_defs['CONFIG_FLASH_LOAD_SIZE'] = \
lookup_defs(part_defs,
chosen['zephyr,code-partition'],
'PARTITION_SIZE')
else:
load_defs['CONFIG_FLASH_LOAD_OFFSET'] = 0
load_defs['CONFIG_FLASH_LOAD_SIZE'] = 0
else:
# We will add addr/size of 0 for systems with no flash controller
# This is what they already do in the Kconfig options anyway
defs['dummy-flash'] = {
'CONFIG_FLASH_BASE_ADDRESS': 0,
'CONFIG_FLASH_SIZE': 0
}
if 'zephyr,flash' in chosen:
insert_defs(chosen['zephyr,flash'], defs, load_defs, {})
return defs
def parse_arguments():
rdh = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=rdh)
parser.add_argument("-d", "--dts", nargs=1, required=True, help="DTS file")
parser.add_argument("-y", "--yaml", nargs=1, required=True,
help="YAML file")
parser.add_argument("-f", "--fixup", nargs='+',
help="Fixup file(s), we allow multiple")
parser.add_argument("-i", "--include", nargs=1, required=True,
help="Generate include file for the build system")
parser.add_argument("-k", "--keyvalue", nargs=1, required=True,
help="Generate config file for the build system")
return parser.parse_args()
def main():
args = parse_arguments()
dts = load_and_parse_dts(args.dts[0])
# build up useful lists
get_reduced(dts['/'], '/')
get_phandles(dts['/'], '/', {})
get_aliases(dts['/'])
get_chosen(dts['/'])
yaml_list = load_yaml_descriptions(dts, args.yaml[0])
defs = generate_node_definitions(yaml_list)
# generate config and include file
generate_keyvalue_file(defs, args.keyvalue[0])
generate_include_file(defs, args.include[0], args.fixup)
if __name__ == '__main__':
main()
|
the-stack_106_29935 | import logging
import time
from pandas import HDFStore
import os
# Adding logging support
logger = logging.getLogger(__name__)
def run_radial1d(radial1d_model, history_fname=None):
if history_fname:
if os.path.exists(history_fname):
logger.warn('History file %s exists - it will be overwritten', history_fname)
os.system('rm %s' % history_fname)
history_buffer = HDFStore(history_fname)
radial1d_model.atom_data.lines.to_hdf(history_buffer, 'atom_data/lines')
radial1d_model.atom_data.levels.to_hdf(history_buffer, 'atom_data/levels')
start_time = time.time()
initialize_j_blues = True
initialize_nlte = True
update_radiation_field = False
while radial1d_model.iterations_remaining > 1:
logger.info('Remaining run %d', radial1d_model.iterations_remaining)
radial1d_model.simulate(update_radiation_field=update_radiation_field, enable_virtual=False, initialize_nlte=initialize_nlte,
initialize_j_blues=initialize_j_blues)
initialize_j_blues=False
initialize_nlte=False
update_radiation_field = True
if history_fname:
radial1d_model.to_hdf5(history_buffer, path='model%03d' % radial1d_model.iterations_executed, close_h5=False)
#Finished second to last loop running one more time
logger.info('Doing last run')
if radial1d_model.tardis_config.montecarlo.last_no_of_packets is not None:
radial1d_model.current_no_of_packets = radial1d_model.tardis_config.montecarlo.last_no_of_packets
radial1d_model.simulate(enable_virtual=True, update_radiation_field=update_radiation_field, initialize_nlte=initialize_nlte,
initialize_j_blues=initialize_j_blues)
if history_fname:
radial1d_model.to_hdf5(history_buffer, path='model%03d' % radial1d_model.iterations_executed)
logger.info("Finished in %d iterations and took %.2f s", radial1d_model.iterations_executed, time.time()-start_time)
|
the-stack_106_29936 | # -*- coding: utf-8 -*-
"""
Newspaper uses a lot of python-goose's parsing code. View theirlicense:
https://github.com/codelucas/newspaper/blob/master/GOOSE-LICENSE.txt
Parser objects will only contain operations that manipulate
or query an lxml or soup dom object generated from an article's html.
"""
import logging
import lxml.etree
import lxml.html
import lxml.html.clean
import re
from html import unescape
import string
from bs4 import UnicodeDammit
from copy import deepcopy
from . import text
log = logging.getLogger(__name__)
class Parser(object):
@classmethod
def xpath_re(cls, node, expression):
regexp_namespace = "http://exslt.org/regular-expressions"
items = node.xpath(expression, namespaces={'re': regexp_namespace})
return items
@classmethod
def drop_tag(cls, nodes):
if isinstance(nodes, list):
for node in nodes:
node.drop_tag()
else:
nodes.drop_tag()
@classmethod
def css_select(cls, node, selector):
return node.cssselect(selector)
@classmethod
def get_unicode_html(cls, html):
if isinstance(html, str):
return html
if not html:
return html
converted = UnicodeDammit(html, is_html=True)
if not converted.unicode_markup:
raise Exception(
'Failed to detect encoding of article HTML, tried: %s' %
', '.join(converted.tried_encodings))
html = converted.unicode_markup
return html
@classmethod
def fromstring(cls, html):
html = cls.get_unicode_html(html)
# Enclosed in a `try` to prevent bringing the entire library
# down due to one article (out of potentially many in a `Source`)
try:
# lxml does not play well with <? ?> encoding tags
if html.startswith('<?'):
html = re.sub(r'^\<\?.*?\?\>', '', html, flags=re.DOTALL)
cls.doc = lxml.html.fromstring(html)
return cls.doc
except Exception:
log.warn('fromstring() returned an invalid string: %s...', html[:20])
return
@classmethod
def clean_article_html(cls, node):
article_cleaner = lxml.html.clean.Cleaner()
article_cleaner.javascript = True
article_cleaner.style = True
article_cleaner.allow_tags = [
'a', 'span', 'p', 'br', 'strong', 'b',
'em', 'i', 'tt', 'code', 'pre', 'blockquote', 'img', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6',
'ul', 'ol', 'li', 'dl', 'dt', 'dd']
article_cleaner.remove_unknown_tags = False
return article_cleaner.clean_html(node)
@classmethod
def nodeToString(cls, node):
"""`decode` is needed at the end because `etree.tostring`
returns a python bytestring
"""
return lxml.etree.tostring(node, method='html').decode()
@classmethod
def replaceTag(cls, node, tag):
node.tag = tag
@classmethod
def stripTags(cls, node, *tags):
lxml.etree.strip_tags(node, *tags)
@classmethod
def getElementById(cls, node, idd):
selector = '//*[@id="%s"]' % idd
elems = node.xpath(selector)
if elems:
return elems[0]
return None
@classmethod
def getElementsByTag(
cls, node, tag=None, attr=None, value=None, childs=False, use_regex=False) -> list:
NS = None
# selector = tag or '*'
selector = 'descendant-or-self::%s' % (tag or '*')
if attr and value:
if use_regex:
NS = {"re": "http://exslt.org/regular-expressions"}
selector = '%s[re:test(@%s, "%s", "i")]' % (selector, attr, value)
else:
trans = 'translate(@%s, "%s", "%s")' % (attr, string.ascii_uppercase, string.ascii_lowercase)
selector = '%s[contains(%s, "%s")]' % (selector, trans, value.lower())
elems = node.xpath(selector, namespaces=NS)
# remove the root node
# if we have a selection tag
if node in elems and (tag or childs):
elems.remove(node)
return elems
@classmethod
def appendChild(cls, node, child):
node.append(child)
@classmethod
def childNodes(cls, node):
return list(node)
@classmethod
def childNodesWithText(cls, node):
root = node
# create the first text node
# if we have some text in the node
if root.text:
t = lxml.html.HtmlElement()
t.text = root.text
t.tag = 'text'
root.text = None
root.insert(0, t)
# loop childs
for c, n in enumerate(list(root)):
idx = root.index(n)
# don't process texts nodes
if n.tag == 'text':
continue
# create a text node for tail
if n.tail:
t = cls.createElement(tag='text', text=n.tail, tail=None)
root.insert(idx + 1, t)
return list(root)
@classmethod
def textToPara(cls, text):
return cls.fromstring(text)
@classmethod
def getChildren(cls, node):
return node.getchildren()
@classmethod
def getElementsByTags(cls, node, tags):
selector = 'descendant::*[%s]' % (
' or '.join('self::%s' % tag for tag in tags))
elems = node.xpath(selector)
return elems
@classmethod
def createElement(cls, tag='p', text=None, tail=None):
t = lxml.html.HtmlElement()
t.tag = tag
t.text = text
t.tail = tail
return t
@classmethod
def getComments(cls, node):
return node.xpath('//comment()')
@classmethod
def getParent(cls, node):
return node.getparent()
@classmethod
def remove(cls, node):
parent = node.getparent()
if parent is not None:
if node.tail:
prev = node.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += ' ' + node.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += ' ' + node.tail
node.clear()
parent.remove(node)
@classmethod
def getTag(cls, node):
return node.tag
@classmethod
def getText(cls, node):
txts = [i for i in node.itertext()]
return text.innerTrim(' '.join(txts).strip())
@classmethod
def previousSiblings(cls, node):
"""
returns preceding siblings in reverse order (nearest sibling is first)
"""
return [n for n in node.itersiblings(preceding=True)]
@classmethod
def previousSibling(cls, node):
return node.getprevious()
@classmethod
def nextSibling(cls, node):
return node.getnext()
@classmethod
def isTextNode(cls, node):
return True if node.tag == 'text' else False
@classmethod
def getAttribute(cls, node, attr=None):
if attr:
attr = node.attrib.get(attr, None)
if attr:
attr = unescape(attr)
return attr
@classmethod
def delAttribute(cls, node, attr=None):
if attr:
_attr = node.attrib.get(attr, None)
if _attr:
del node.attrib[attr]
@classmethod
def setAttribute(cls, node, attr=None, value=None):
if attr and value:
node.set(attr, value)
@classmethod
def outerHtml(cls, node):
e0 = node
if e0.tail:
e0 = deepcopy(e0)
e0.tail = None
return cls.nodeToString(e0)
@classmethod
def is_tag_visible(cls, tag):
_style = tag.get('style')
if _style and \
re.search(
r'(display:[\s]*none|visibility:[\s]*hidden)', _style.lower()):
return False
return True
|
the-stack_106_29937 | #/usr/bin/python
import re
import os
import sys
RE_TYPE_NAME = re.compile(r'([\s]*k[A-Za-z0-9]+),.*')
CPP_TEMPLATE = (
"""std::string type_names[] = {{
{}
}};
"""
)
def main(*argv):
"""Parse the MFn header file to generate an array of MFn::Type names."""
cmd, = (argv or [None])
mfn_inl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'src', 'MFn.Types.inl')
if cmd == 'parse':
_parse(mfn_inl_path)
elif cmd == 'clear':
_clear(mfn_inl_path)
return 0
def _clear(mfn_inl_path):
with open(mfn_inl_path, 'w') as fp:
fp.write('// Auto-generated by /Scripts/mfn.py at build type\n')
def _parse(mfn_inl_path):
mfn_header = os.path.join(os.environ['DEVKIT_LOCATION'], 'include', 'maya', 'MFn.h')
with open(mfn_header, 'r') as fp:
lines = fp.readlines()
type_name_list = [
'kInvalid'
]
for line in lines:
match = RE_TYPE_NAME.match(line)
if not match:
continue
type_name = match.groups(0)[0].strip()
type_name_list.append(type_name)
cpp_file = CPP_TEMPLATE.format(
',\n\t'.join(
['"{}"'.format(each) for each in type_name_list]
)
)
with open(mfn_inl_path, 'w') as fp:
fp.write(cpp_file)
if __name__ == '__main__':
main(*sys.argv[1:]) |
the-stack_106_29938 | from settings.default import *
SITE_ID = 1
DEBUG = True
LOCAL_SERVE = True
SOUTH_TESTS_MIGRATE = False
# Dummy cache for development
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Set session engine to db so that our session doesn't get lost without cache
SESSION_ENGINE = "django.contrib.sessions.backends.db"
# DB
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'eurorack-planner.sqlite'
}
}
# Debug Toolbar
SHOW_DEBUG_TOOLBAR = True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'HIDE_DJANGO_SQL': True,
}
# Email testing
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
the-stack_106_29940 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentOperation(Model):
"""Deployment operation information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Full deployment operation ID.
:vartype id: str
:ivar operation_id: Deployment operation ID.
:vartype operation_id: str
:param properties: Deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentOperationProperties
"""
_validation = {
'id': {'readonly': True},
'operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'operation_id': {'key': 'operationId', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentOperationProperties'},
}
def __init__(self, *, properties=None, **kwargs) -> None:
super(DeploymentOperation, self).__init__(**kwargs)
self.id = None
self.operation_id = None
self.properties = properties
|
the-stack_106_29944 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyScooby(PythonPackage):
"""A Great Dane turned Python environment detective."""
homepage = "https://github.com/banesullivan/scooby"
pypi = "scooby/scooby-0.5.7.tar.gz"
version('0.5.7', sha256='ae2c2b6f5f5d10adf7aaab32409028f1e28d3ce833664bdd1e8c2072e8da169a')
depends_on('py-setuptools', type='build')
|
the-stack_106_29946 | from evelink import api, constants
from evelink.parsing.assets import parse_assets
from evelink.parsing.contact_list import parse_contact_list
from evelink.parsing.contract_bids import parse_contract_bids
from evelink.parsing.contract_items import parse_contract_items
from evelink.parsing.contracts import parse_contracts
from evelink.parsing.industry_jobs import parse_industry_jobs
from evelink.parsing.kills import parse_kills
from evelink.parsing.orders import parse_market_orders
from evelink.parsing.wallet_journal import parse_wallet_journal
from evelink.parsing.wallet_transactions import parse_wallet_transactions
class auto_call(api.auto_call):
"""Extends 'evelink.api.auto_call' to add 'Char.char_id' as an api
request argument.
"""
def __init__(self, path, map_params=None, **kw):
map_params = map_params if map_params else {}
map_params['char_id'] = 'characterID'
super(auto_call, self).__init__(
path, prop_to_param=('char_id',), map_params=map_params, **kw
)
class Char(object):
"""Wrapper around /char/ of the EVE API.
Note that a valid API key is required.
"""
def __init__(self, char_id, api):
self.api = api
self.char_id = char_id
@auto_call('char/AssetList')
def assets(self, api_result=None):
"""Get information about corp assets.
Each item is a dict, with keys 'id', 'item_type_id',
'quantity', 'location_id', 'location_flag', and 'packaged'.
'location_flag' denotes additional information about the
item's location; see
http://wiki.eve-id.net/API_Inventory_Flags for more details.
If the item corresponds to a container, it will have a key
'contents', which is itself a list of items in the same format
(potentially recursively holding containers of its own). If
the contents do not have 'location_id's of their own, they
inherit the 'location_id' of their parent container, for
convenience.
At the top level, the result is a dict mapping location ID
(typically a solar system) to a dict containing a 'contents'
key, which maps to a list of items. That is, you can think of
the top-level values as "containers" with no fields except for
"contents" and "location_id".
"""
return api.APIResult(parse_assets(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/ContractBids')
def contract_bids(self, api_result=None):
"""Lists the latest bids that have been made to any recent auctions."""
return api.APIResult(parse_contract_bids(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/ContractItems', map_params={'contract_id': 'contractID'})
def contract_items(self, contract_id, api_result=None):
"""Lists items that a specified contract contains"""
return api.APIResult(parse_contract_items(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/Contracts')
def contracts(self, api_result=None):
"""Returns a record of all contracts for a specified character"""
return api.APIResult(parse_contracts(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/WalletJournal', map_params={'before_id': 'fromID', 'limit': 'rowCount'})
def wallet_journal(self, before_id=None, limit=None, api_result=None):
"""Returns a complete record of all wallet activity for a specified character"""
return api.APIResult(parse_wallet_journal(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/AccountBalance')
def wallet_info(self, api_result=None):
"""Return a given character's wallet."""
rowset = api_result.result.find('rowset')
row = rowset.find('row')
result = {
'balance': float(row.attrib['balance']),
'id': int(row.attrib['accountID']),
'key': int(row.attrib['accountKey']),
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
def wallet_balance(self):
"""Helper to return just the balance from a given character wallet"""
api_result = self.wallet_info()
return api.APIResult(api_result.result['balance'], api_result.timestamp, api_result.expires)
@auto_call('char/WalletTransactions', map_params={'before_id': 'fromID', 'limit': 'rowCount'})
def wallet_transactions(self, before_id=None, limit=None, api_result=None):
"""Returns wallet transactions for a character."""
return api.APIResult(parse_wallet_transactions(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/IndustryJobs')
def industry_jobs(self, api_result=None):
"""Get a list of jobs for a character"""
return api.APIResult(parse_industry_jobs(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/KillLog', map_params={'before_kill': 'beforeKillID'})
def kills(self, before_kill=None, api_result=None):
"""Look up recent kills for a character.
before_kill:
Optional. Only show kills before this kill id. (Used for paging.)
"""
return api.APIResult(parse_kills(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/Notifications')
def notifications(self, api_result=None):
"""Returns the message headers for notifications."""
result = {}
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
a = row.attrib
notification_id = int(a['notificationID'])
result[notification_id] = {
'id': notification_id,
'type_id': int(a['typeID']),
'sender_id': int(a['senderID']),
'timestamp': api.parse_ts(a['sentDate']),
'read': a['read'] == '1',
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/NotificationTexts', map_params={'notification_ids': 'IDs'})
def notification_texts(self, notification_ids, api_result=None):
"""Returns the message bodies for notifications."""
result = {}
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
notification_id = int(row.attrib['notificationID'])
notification = {'id': notification_id}
notification.update(api.parse_keyval_data(row.text))
result[notification_id] = notification
missing_ids = api_result.result.find('missingIDs')
if missing_ids is not None:
for missing_id in missing_ids.text.split(","):
result[missing_id] = None
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/Standings')
def standings(self, api_result=None):
"""Returns the standings towards a character from NPC entities."""
result = {}
rowsets = {}
for rowset in api_result.result.find('characterNPCStandings').findall('rowset'):
rowsets[rowset.attrib['name']] = rowset
_name_map = {
'agents': 'agents',
'corps': 'NPCCorporations',
'factions': 'factions',
}
for key, rowset_name in _name_map.iteritems():
result[key] = {}
for row in rowsets[rowset_name].findall('row'):
a = row.attrib
from_id = int(a['fromID'])
result[key][from_id] = {
'id': from_id,
'name': a['fromName'],
'standing': float(a['standing']),
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/CharacterSheet')
def character_sheet(self, api_result=None):
"""Returns attributes relating to a specific character."""
_str, _int, _float, _bool, _ts = api.elem_getters(api_result.result)
result = {
'id': _int('characterID'),
'name': _str('name'),
'create_ts': _ts('DoB'),
'race': _str('race'),
'bloodline': _str('bloodLine'),
'ancestry': _str('ancestry'),
'gender': _str('gender'),
'corp': {
'id': _int('corporationID'),
'name': _str('corporationName'),
},
'alliance': {
'id': _int('allianceID') or None,
'name': _str('allianceName'),
},
'clone': {
'name': _str('cloneName'),
'skillpoints': _int('cloneSkillPoints'),
},
'balance': _float('balance'),
'attributes': {},
}
for attr in ('intelligence', 'memory', 'charisma', 'perception', 'willpower'):
result['attributes'][attr] = {}
base = int(api_result.result.findtext('attributes/%s' % attr))
result['attributes'][attr]['base'] = base
result['attributes'][attr]['total'] = base
bonus = api_result.result.find('attributeEnhancers/%sBonus' % attr)
if bonus is not None:
mod = int(bonus.findtext('augmentatorValue'))
result['attributes'][attr]['total'] += mod
result['attributes'][attr]['bonus'] = {
'name': bonus.findtext('augmentatorName'),
'value': mod,
}
rowsets = {}
for rowset in api_result.result.findall('rowset'):
key = rowset.attrib['name']
rowsets[key] = rowset
result['skills'] = []
result['skillpoints'] = 0
for skill in rowsets['skills']:
a = skill.attrib
sp = int(a['skillpoints'])
result['skills'].append({
'id': int(a['typeID']),
'skillpoints': sp,
'level': int(a['level']),
'published': a['published'] == '1',
})
result['skillpoints'] += sp
result['certificates'] = set()
for cert in rowsets['certificates']:
result['certificates'].add(int(cert.attrib['certificateID']))
result['roles'] = {}
for our_role, ccp_role in constants.Char().corp_roles.iteritems():
result['roles'][our_role] = {}
for role in rowsets[ccp_role]:
a = role.attrib
role_id = int(a['roleID'])
result['roles'][our_role][role_id] = {
'id': role_id,
'name': a['roleName'],
}
result['titles'] = {}
for title in rowsets['corporationTitles']:
a = title.attrib
title_id = int(a['titleID'])
result['titles'][title_id] = {
'id': title_id,
'name': a['titleName'],
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/ContactList')
def contacts(self, api_result=None):
"""Return a character's personal, corp and alliance contact lists."""
return api.APIResult(parse_contact_list(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/MarketOrders')
def orders(self, api_result=None):
"""Return a given character's buy and sell orders."""
return api.APIResult(parse_market_orders(api_result.result), api_result.timestamp, api_result.expires)
@auto_call('char/Research')
def research(self, api_result=None):
"""Returns information about the agents with whom the character is doing research."""
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
result = {}
for row in rows:
a = row.attrib
id = int(a['agentID'])
result[id] = {
'id': id,
'skill_id': int(a['skillTypeID']),
'timestamp': api.parse_ts(a['researchStartDate']),
'per_day': float(a['pointsPerDay']),
'remaining': float(a['remainderPoints']),
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/SkillInTraining')
def current_training(self, api_result=None):
"""Returns the skill that is currently being trained by a specified character"""
_str, _int, _float, _bool, _ts = api.elem_getters(api_result.result)
result = {
'start_ts': _ts('trainingStartTime'),
'end_ts': _ts('trainingEndTime'),
'type_id': _int('trainingTypeID'),
'start_sp': _int('trainingStartSP'),
'end_sp': _int('trainingDestinationSP'),
'current_ts': _ts('currentTQTime'),
'level': _int('trainingToLevel'),
'active': _bool('skillInTraining'),
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/SkillQueue')
def skill_queue(self, api_result=None):
"""returns the skill queue of the character"""
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
result = []
for row in rows:
a = row.attrib
line = {
'position': int(a['queuePosition']),
'type_id': int(a['typeID']),
'level': int(a['level']),
'start_sp': int(a['startSP']),
'end_sp': int(a['endSP']),
'start_ts': api.parse_ts(a['startTime']),
'end_ts': api.parse_ts(a['endTime']),
}
result.append(line)
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/MailMessages')
def messages(self, api_result=None):
"""Returns a list of headers for a character's mail."""
rowset = api_result.result.find('rowset')
results = []
for row in rowset.findall('row'):
a = row.attrib
message = {
'id': int(a['messageID']),
'sender_id': int(a['senderID']),
'timestamp': api.parse_ts(a['sentDate']),
'title': a['title'],
'to': {},
}
org_id = a['toCorpOrAllianceID']
message['to']['org_id'] = int(org_id) if org_id else None
char_ids = a['toCharacterIDs']
message['to']['char_ids'] = [int(i) for i in char_ids.split(',')] if char_ids else None
list_ids = a['toListID']
message['to']['list_ids'] = [int(i) for i in list_ids.split(',')] if list_ids else None
results.append(message)
return api.APIResult(results, api_result.timestamp, api_result.expires)
@auto_call('char/MailBodies', map_params={'message_ids': 'ids'})
def message_bodies(self, message_ids, api_result=None):
"""Returns the actual body content of a set of mail messages.
NOTE: You *must* have recently looked up the headers of
any messages you are requesting bodies for (via the 'messages'
method) or else this call will fail.
"""
rowset = api_result.result.find('rowset')
results = {}
for row in rowset.findall('row'):
message_id = int(row.attrib['messageID'])
results[message_id] = row.text
missing_set = api_result.result.find('missingMessageIDs')
if missing_set is not None:
missing_ids = [int(i) for i in missing_set.text.split(',')]
for missing_id in missing_ids:
results[missing_id] = None
return api.APIResult(results, api_result.timestamp, api_result.expires)
@auto_call('char/MailingLists')
def mailing_lists(self, api_result=None):
"""Returns the mailing lists to which a character is subscribed."""
rowset = api_result.result.find('rowset')
results = {}
for row in rowset.findall('row'):
a = row.attrib
results[int(a['listID'])] = a['displayName']
return api.APIResult(results, api_result.timestamp, api_result.expires)
@auto_call('char/UpcomingCalendarEvents')
def calendar_events(self, api_result=None):
"""Returns the list of upcoming calendar events for a character."""
results = {}
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
a = row.attrib
event = {
'id': int(a['eventID']),
'owner': {
'id': int(a['ownerID']),
'name': a['ownerName'] or None,
},
'start_ts': api.parse_ts(a['eventDate']),
'title': a['eventTitle'],
'duration': int(a['duration']),
'important': a['importance'] == '1',
'description': a['eventText'],
'response': a['response'],
}
results[event['id']] = event
return api.APIResult(results, api_result.timestamp, api_result.expires)
@auto_call('char/CalendarEventAttendees', map_params={'event_ids': 'eventIDs'})
def calendar_attendees(self, event_ids, api_result=None):
"""Returns the list of attendees for the specified calendar event.
This function takes a list of event IDs and returns a dict of dicts,
with the top-level dict being keyed by event ID and the children
keyed by the character IDs of the attendees.
NOTE: You must have recently fetched the list of calendar events
(using the 'calendar_events' method) before calling this method.
"""
results = dict((int(i),{}) for i in event_ids)
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
a = row.attrib
attendee = {
'id': int(a['characterID']),
'name': a['characterName'],
'response': a['response'],
}
results[int(a['eventID'])][attendee['id']] = attendee
return api.APIResult(results, api_result.timestamp, api_result.expires)
def event_attendees(self, event_id, api_result=None):
"""Returns the attendees for a single event.
(This is a convenience wrapper around 'calendar_attendees'.)
NOTE: You must have recently fetched the list of calendar events
(using the 'calendar_events' method) before calling this method.
"""
api_result = self.calendar_attendees([event_id])
return api.APIResult(api_result.result[int(event_id)], api_result.timestamp, api_result.expires)
@auto_call('char/FacWarStats')
def faction_warfare_stats(self, api_result=None):
"""Returns FW stats for this character, if enrolled in FW.
NOTE: This will return an error instead if the character
is not enrolled in Faction Warfare.
"""
_str, _int, _float, _bool, _ts = api.elem_getters(api_result.result)
result = {
'faction': {
'id': _int('factionID'),
'name': _str('factionName'),
},
'enlist_ts': _ts('enlisted'),
'rank': {
'current': _int('currentRank'),
'highest': _int('highestRank'),
},
'kills': {
'yesterday': _int('killsYesterday'),
'week': _int('killsLastWeek'),
'total': _int('killsTotal'),
},
'points': {
'yesterday': _int('victoryPointsYesterday'),
'week': _int('victoryPointsLastWeek'),
'total': _int('victoryPointsTotal'),
},
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/Medals')
def medals(self, api_result=None):
"""Returns a list of medals the character has."""
result = {'current': {}, 'other': {}}
_map = {
'currentCorporation': 'current',
'otherCorporations': 'other',
}
for rowset in api_result.result.findall('rowset'):
name = _map[rowset.attrib['name']]
for row in rowset.findall('row'):
a = row.attrib
medal_id = int(a['medalID'])
result[name][medal_id] = {
'id': medal_id,
'reason': a['reason'],
'public': a['status'] == 'public',
'issuer_id': int(a['issuerID']),
'corp_id': int(a['corporationID']),
'title': a['title'],
'description': a['description'],
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@auto_call('char/ContactNotifications')
def contact_notifications(self, api_result=None):
"""Returns pending contact notifications."""
results = {}
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
a = row.attrib
note = {
'id': int(a['notificationID']),
'sender': {
'id': int(a['senderID']),
'name': a['senderName'],
},
'timestamp': api.parse_ts(a['sentDate']),
'data': api.parse_keyval_data(a['messageData']),
}
results[note['id']] = note
return api.APIResult(results, api_result.timestamp, api_result.expires)
@auto_call('char/Locations', map_params={'location_list': 'IDs'})
def locations(self, location_list, api_result=None):
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
name = row.attrib['itemName'] or None
id = int(row.attrib['itemID']) or None
x = float(row.attrib['x']) or None
y = float(row.attrib['y']) or None
z = float(row.attrib['z']) or None
results[id] = {
'name': name,
'id' : id,
'x' : x,
'y' : y,
'z' : z,
}
return api.APIResult(results, api_result.timestamp, api_result.expires)
# vim: set ts=4 sts=4 sw=4 et:
|
the-stack_106_29948 | """
Plot using OceanDataset objects.
"""
# Instructions for developers:
# 1. All funcions must return plt.Axes or xr.plot.FacetGrid objects,
# 2. Functions should use the cutout_kwargs argument at the beginning.
# 3. Make functions compatible with the animate module,
# and create a twin function under animate.
# 4. Add new functions to _plotMethods
# 5. Add new functions to docs/api.rst
import functools as _functools
import warnings as _warnings
import numpy as _np
import pandas as _pd
# Required dependencies (private)
import xarray as _xr
import oceanspy as _ospy
# From oceanspy (private)
from . import compute as _compute
from ._ospy_utils import (
_check_instance,
_check_mean_and_int_axes,
_check_options,
_rename_aliased,
)
from .compute import _add_missing_variables
from .compute import integral as _integral
from .compute import weighted_mean as _weighted_mean
# Additional dependencies (private)
try:
import matplotlib.pyplot as _plt
except ImportError: # pragma: no cover
pass
try:
import cartopy.crs as _ccrs
except ImportError: # pragma: no cover
pass
def TS_diagram(
od,
Tlim=None,
Slim=None,
dens=None,
meanAxes=None,
colorName=None,
plotFreez=True,
ax=None,
cmap_kwargs=None,
contour_kwargs=None,
clabel_kwargs=None,
cutout_kwargs=None,
**kwargs
):
"""
Plot temperature-salinity diagram.
Parameters
----------
od: OceanDataset
oceandataset used to plot.
Tlim: array_like with 2 elements
Temperature limits on the y axis.
If None, uses min and max values.
Slim: array_like with 2 elements
Salinity limits on the x axis.
If None, uses min and max values.
dens: xarray.DataArray
DataArray with densities used for isopycnals.
Must have coordinates (Temp, S).
In None, dens is inferred from Temp and S.
meanAxes: 1D array_like, str, or None
List of axes over which to apply weighted mean.
If None, don't average.
colorName: str, None
Name of the variable to use to color (e.g., Temp).
If None, uses plot insted of scatter (much faster)
plotFreez: bool
If True, plot freezing line in blue.
ax: matplotlib.pyplot.axes
If None, uses the current axis.
cmap_kwargs: dict
Keyword arguments for the colormap (same used by xarray)
contour_kwargs: dict
Keyword arguments for
:py:func:`matplotlib.pytplot.contour` (isopycnals)
clabel_kwargs: dict
Keyword arguments for
:py:func:`matplotlib.pytplot.clabel` (isopycnals)
cutout_kwargs: dict
Keyword arguments for
:py:func:`oceanspy.subsample.cutout`
**kwargs:
If colorName is None:
Kewyword arguments for :py:func:`matplotlib.pytplot.plot`
Otherwise,
kewyword arguments for :py:func:`matplotlib.pytplot.scatter`
Returns
-------
ax: matplotlib.pyplot.axes
Axes object.
References
----------
http://xarray.pydata.org/en/stable/plotting.html#introduction
See Also
--------
oceanspy.animate.TS_diagram
"""
# Check parameters
_check_instance(
{
"od": od,
"colorName": colorName,
"plotFreez": plotFreez,
"ax": ax,
"cmap_kwargs": cmap_kwargs,
"contour_kwargs": contour_kwargs,
"clabel_kwargs": clabel_kwargs,
"cutout_kwargs": cutout_kwargs,
"dens": dens,
},
{
"od": "oceanspy.OceanDataset",
"colorName": ["type(None)", "str"],
"plotFreez": "bool",
"ax": ["type(None)", "matplotlib.pyplot.Axes"],
"cmap_kwargs": ["type(None)", "dict"],
"contour_kwargs": ["type(None)", "dict"],
"clabel_kwargs": ["type(None)", "dict"],
"cutout_kwargs": ["type(None)", "dict"],
"dens": ["type(None)", "xarray.DataArray"],
},
)
if Tlim is not None:
Tlim = _np.asarray(Tlim)
if Tlim.size != 2:
raise ValueError("`Tlim` must contain 2 elements")
Tlim = Tlim.reshape(2)
if Slim is not None:
Slim = _np.asarray(Slim)
if Slim.size != 2:
raise ValueError("`Slim` must contain 2 elements")
Slim = Slim.reshape(2)
if dens is not None and not set(["Temp", "S"]).issubset(dens.coords):
raise ValueError("`dens` must have coordinates (Temp, S)")
# Change None in empty dict
if cmap_kwargs is None:
cmap_kwargs = {}
cmap_kwargs = dict(cmap_kwargs)
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs = dict(contour_kwargs)
if clabel_kwargs is None:
clabel_kwargs = {}
clabel_kwargs = dict(clabel_kwargs)
if cutout_kwargs is None:
cutout_kwargs = {}
cutout_kwargs = dict(cutout_kwargs)
# Cutout first
if len(cutout_kwargs) != 0:
od = od.subsample.cutout(**cutout_kwargs)
# Check and extract T and S
varList = ["Temp", "S"]
od = _add_missing_variables(od, varList)
# Compute mean
if meanAxes is not None:
mean_ds = _compute.weighted_mean(
od,
varNameList=["Temp", "S"],
axesList=meanAxes,
storeWeights=False,
aliased=False,
)
T = mean_ds["w_mean_Temp"].rename("Temp")
S = mean_ds["w_mean_S"].rename("S")
lost_coords = list(set(od._ds["Temp"].dims) - set(T.coords))
else:
T = od._ds["Temp"]
S = od._ds["S"]
lost_coords = []
# Extract color field, and interpolate if needed
if colorName is not None:
# Add missing variables (use private)
_colorName = _rename_aliased(od, colorName)
od = _add_missing_variables(od, _colorName)
# Extract color (use public)
color = od.dataset[colorName]
if meanAxes is not None:
mean_ds = _compute.weighted_mean(
od,
varNameList=_colorName,
axesList=meanAxes,
storeWeights=False,
aliased=False,
)
color = mean_ds["w_mean_" + _colorName].rename(_colorName)
else:
color = od.dataset[colorName]
grid = od.grid
dims2interp = [dim for dim in color.dims if dim not in T.dims]
# Interpolation
for dim in dims2interp:
for axis in od.grid.axes.keys():
if dim in [
od.grid.axes[axis].coords[k]
for k in od.grid.axes[axis].coords.keys()
]:
print(
"Interpolating [{}] along [{}]-axis." "".format(colorName, axis)
)
attrs = color.attrs
color = grid.interp(
color, axis, to="center", boundary="fill", fill_value=_np.nan
)
color.attrs = attrs
# Broadcast, in case color has different dimensions
T, S, color = _xr.broadcast(T, S, color)
# Compute density
T = T.persist()
S = S.persist()
if Tlim is None:
Tlim = [T.min().values, T.max().values]
if Slim is None:
Slim = [S.min().values, S.max().values]
if dens is None:
print("Isopycnals: ", end="")
tlin = _xr.DataArray(_np.linspace(Tlim[0], Tlim[-1], 100), dims=("t"))
slin = _xr.DataArray(_np.linspace(Slim[0], Slim[-1], 100), dims=("s"))
t, s = _xr.broadcast(tlin, slin)
odSigma0 = _ospy.OceanDataset(_xr.Dataset({"Temp": t, "S": s}))
odSigma0 = odSigma0.set_parameters(od.parameters)
odSigma0 = odSigma0.compute.potential_density_anomaly()
odSigma0._ds = odSigma0._ds.set_coords(["Temp", "S"])
# Freezing point
paramsList = ["tempFrz0", "dTempFrz_dS"]
params2use = {
par: od.parameters[par] for par in od.parameters if par in paramsList
}
tempFrz0 = params2use["tempFrz0"]
dTempFrz_dS = params2use["dTempFrz_dS"]
freez_point = tempFrz0 + odSigma0._ds["S"] * dTempFrz_dS
# Extract Density
dens = odSigma0._ds["Sigma0"].where(odSigma0._ds["Temp"] > freez_point)
# Create axis
if ax is None:
ax = _plt.gca()
# Use plot if colorless (faster!), otherwise use scatter
if colorName is None:
default_kwargs = {"color": "k", "linestyle": "None", "marker": "."}
kwargs = {**default_kwargs, **kwargs}
ax.plot(S.values.flatten(), T.values.flatten(), **kwargs)
else:
# Mask points out of axes
color = color.where(_np.logical_and(T > min(Tlim), T < max(Tlim)))
color = color.where(_np.logical_and(S > min(Slim), T < max(Slim)))
color = color.stack(all_dims=color.dims)
c = color.values
# Create colorbar (stolen from xarray)
cmap_kwargs["plot_data"] = c
cmap_params = _xr.plot.utils._determine_cmap_params(**cmap_kwargs)
extend = cmap_params.pop("extend")
_ = cmap_params.pop("levels")
kwargs = {**cmap_params, **kwargs}
# Scatter
sc = ax.scatter(S.values.flatten(), T.values.flatten(), c=c, **kwargs)
_plt.colorbar(sc, label=_xr.plot.utils.label_from_attrs(color), extend=extend)
# Plot isopycnals
t = dens["Temp"]
s = dens["S"]
col_keys = ["colors", "cmap"]
default_contour_kwargs = {key: contour_kwargs.pop(key, None) for key in col_keys}
if all(default_contour_kwargs[key] is None for key in col_keys):
default_contour_kwargs["colors"] = "gray"
contour_kwargs = {**default_contour_kwargs, **contour_kwargs}
CS = ax.contour(s.values, t.values, dens.values, **contour_kwargs)
ax.clabel(CS, **clabel_kwargs)
# Plot freezing point
if plotFreez:
paramsList = ["tempFrz0", "dTempFrz_dS"]
params2use = {
par: od.parameters[par] for par in od.parameters if par in paramsList
}
tempFrz0 = params2use["tempFrz0"]
dTempFrz_dS = params2use["dTempFrz_dS"]
s = _np.unique(s.values.flatten())
ax.plot(s, tempFrz0 + s * dTempFrz_dS, "b")
# Set labels and limits
ax.set_xlabel(_xr.plot.utils.label_from_attrs(S))
ax.set_ylabel(_xr.plot.utils.label_from_attrs(T))
ax.set_xlim(Slim)
ax.set_ylim(Tlim)
# Set title
title = []
all_coords = list(lost_coords) + list(T.coords)
skip_coords = ["X", "Y", "Xp1", "Yp1"]
if any([dim in od._ds.dims for dim in ["mooring", "station", "particle"]]):
skip_coords = [coord for coord in od._ds.coords if "X" in coord or "Y" in coord]
for coord in all_coords:
if coord not in skip_coords:
if coord in list(lost_coords):
da = od._ds["Temp"]
pref = "<"
suf = ">"
else:
da = T
pref = ""
suf = ""
rng = [da[coord].min().values, da[coord].max().values]
units = da[coord].attrs.pop("units", "")
if units.lower() == "none":
units = ""
if "time" in coord:
for i, v in enumerate(rng):
ts = _pd.to_datetime(str(v))
rng[i] = ts.strftime("%Y-%m-%d %r")
if rng[0] == rng[-1]:
rng = "{}".format(rng[0])
else:
rng = "from {} to {}".format(rng[0], rng[1])
title = title + ["{}{}{}: {} {}" "".format(pref, coord, suf, rng, units)]
ax.set_title("\n".join(title))
return ax
def time_series(
od, varName, meanAxes=False, intAxes=False, cutout_kwargs=None, **kwargs
):
"""
Plot time series.
Parameters
----------
od: OceanDataset
oceandataset used to plot.
varName: str, None
Name of the variable to plot.
meanAxes: 1D array_like, str, or bool
List of axes over which to apply
:py:func:`oceanspy.compute.weighted_mean`.
If True,
set meanAxes= :py:attr:`oceanspy.OceanDataset.grid_coords`.
If False, skip weighted mean.
intAxes: 1D array_like, str, or bool
List of axes over which to apply
:py:func:`oceanspy.compute.integral`.
If True,
set intAxes= :py:attr:`oceanspy.OceanDataset.grid_coords`.
If False, skip integral.
cutout_kwargs: dict
Keyword arguments for
:py:func:`oceanspy.subsample.cutout`
**kwargs:
Kewyword arguments for :py:func:`xarray.plot.line`
Returns
-------
ax: matplotlib.pyplot.axes
Axes object.
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.plot.line.html#xarray.plot.line
"""
# Check parameters
_check_instance(
{"od": od, "varName": varName, "cutout_kwargs": cutout_kwargs},
{
"od": "oceanspy.OceanDataset",
"varName": "str",
"cutout_kwargs": ["type(None)", "dict"],
},
)
# Check mean and int axes
meanAxes, intAxes = _check_mean_and_int_axes(
od=od, meanAxes=meanAxes, intAxes=intAxes, exclude=["time"]
)
# Handle kwargs
if cutout_kwargs is None:
cutout_kwargs = {}
cutout_kwargs = dict(cutout_kwargs)
# Cutout first
if len(cutout_kwargs) != 0:
od = od.subsample.cutout(**cutout_kwargs)
# Variable name
_varName = _rename_aliased(od, varName)
od = _add_missing_variables(od, _varName)
# Mean and sum
da, varName = _compute_mean_and_int(od, varName, meanAxes, intAxes)
# Get time name
time_name = [dim for dim in od.grid_coords["time"] if dim in da.dims]
if len(time_name) != 1:
raise ValueError("Couldn't find time dimension")
else:
time_name = time_name[0]
# Check
if len(da.shape) > 2:
dims = list(da.dims)
dims.remove(time_name)
raise ValueError(
"Timeseries containing multiple"
" dimension other than time: {}".format(dims)
)
# Plot
_ = da.plot.line(**{"x": time_name, **kwargs})
_plt.tight_layout()
return _plt.gca()
def horizontal_section(
od,
varName,
plotType="pcolormesh",
use_coords=True,
contourName=None,
meanAxes=False,
intAxes=False,
contour_kwargs=None,
clabel_kwargs=None,
cutout_kwargs=None,
**kwargs
):
"""
Plot horizontal section.
Parameters
----------
od: OceanDataset
oceandataset used to plot.
varName: str, None
Name of the variable to plot.
plotType: str
2D plot type.
Options: {'contourf', 'contour', 'imshow', 'pcolormesh'}
use_coords: bool
If True, use coordinates for x and y axis (e.g., XC and YC).
If False, use dimensions for x and y axis (e.g., X and Y)
contourName: str, None
Name of the variable to contour on top.
meanAxes: 1D array_like, str, or bool
List of axes over which to apply
:py:func:`oceanspy.compute.weighted_mean`.
If True,
set meanAxes= :py:attr:`oceanspy.OceanDataset.grid_coords`.
If False, skip weighted mean.
intAxes: 1D array_like, str, or bool
List of axes over which to apply
:py:func:`oceanspy.compute.integral`.
If True,
set intAxes= :py:attr:`oceanspy.OceanDataset.grid_coords`.
If False, skip integral.
contour_kwargs: dict
Keyword arguments for :py:func:`xarray.plot.contour`
clabel_kwargs: dict
Keyword arguments for :py:func:`matplotlib.pyplot.clabel`
cutout_kwargs: dict
Keyword arguments for
:py:func:`oceanspy.subsample.cutout`
**kwargs:
Kewyword arguments for :py:mod:`xarray.plot`.plotType
Returns
-------
matplotlib.pyplot.axes or xarray.plot.FacetGrid
References
----------
http://xarray.pydata.org/en/stable/plotting.html
See Also
--------
oceanspy.animate.horizontal_section
"""
# Check parameters
_check_instance(
{
"od": od,
"varName": varName,
"plotType": plotType,
"use_coords": use_coords,
"contourName": contourName,
"contour_kwargs": contour_kwargs,
"clabel_kwargs": clabel_kwargs,
"cutout_kwargs": cutout_kwargs,
},
{
"od": "oceanspy.OceanDataset",
"varName": "str",
"plotType": "str",
"use_coords": "bool",
"contourName": ["type(None)", "str"],
"contour_kwargs": ["type(None)", "dict"],
"clabel_kwargs": ["type(None)", "dict"],
"cutout_kwargs": ["type(None)", "dict"],
},
)
# Check oceandataset
wrong_dims = ["mooring", "station", "particle"]
if any([dim in od._ds.dims for dim in wrong_dims]):
raise ValueError(
"`plot.vertical_section` does not support"
" `od` with the following dimensions: "
"{}".format(wrong_dims)
)
# Check plot
_check_options(
name="plotType",
selected=plotType,
options=["contourf", "contour", "imshow", "pcolormesh"],
)
# Handle kwargs
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs = dict(contour_kwargs)
if clabel_kwargs is None:
clabel_kwargs = {}
clabel_kwargs = dict(clabel_kwargs)
if cutout_kwargs is None:
cutout_kwargs = {}
cutout_kwargs = dict(cutout_kwargs)
# Cutout first
if len(cutout_kwargs) != 0:
od = od.subsample.cutout(**cutout_kwargs)
# Check variables and add
listName = [varName]
if contourName is not None:
listName = listName + [contourName]
_listName = _rename_aliased(od, listName)
od = _add_missing_variables(od, _listName)
# Check mean and int axes
meanAxes, intAxes = _check_mean_and_int_axes(
od=od, meanAxes=meanAxes, intAxes=intAxes, exclude=["X", "Y"]
)
# Apply mean and sum
da, varName = _compute_mean_and_int(od, varName, meanAxes, intAxes)
# SQUEEZE! Otherwise animation don't show up
# because xarray makes a faceted plot
da = da.squeeze()
# Get dimension names
X_name = [dim for dim in od.grid_coords["X"] if dim in da.dims][0]
Y_name = [dim for dim in od.grid_coords["Y"] if dim in da.dims][0]
# CONTOURNAME
if contourName is not None:
# Apply mean and sum
da_contour, contourName = _compute_mean_and_int(
od, contourName, meanAxes, intAxes
)
# SQUEEZE! Otherwise animation don't show up
# because xarray makes a faceted plot
da_contour = da_contour.squeeze()
# Get dimension names
X_name_cont = [dim for dim in od.grid_coords["X"] if dim in da_contour.dims][0]
Y_name_cont = [dim for dim in od.grid_coords["Y"] if dim in da_contour.dims][0]
# Get dimensions
dims = list(da.dims)
dims.remove(X_name)
dims.remove(Y_name)
# Use coordinates
if use_coords:
al_dim = {}
for dim in ["X", "Y", "Xp1", "Yp1"]:
al_dim[dim] = _rename_aliased(od, varNameList=dim)
if X_name == al_dim["X"] and Y_name == al_dim["Y"]:
point = "C"
elif X_name == al_dim["Xp1"] and Y_name == al_dim["Y"]:
point = "U"
elif X_name == al_dim["X"] and Y_name == al_dim["Yp1"]:
point = "V"
else:
point = "G"
X_name = _rename_aliased(od, varNameList="X" + point)
Y_name = _rename_aliased(od, varNameList="Y" + point)
if contourName is not None:
if all([X_name_cont == al_dim["X"], Y_name_cont == al_dim["Y"]]):
point_cont = "C"
elif all([X_name_cont == al_dim["Xp1"], Y_name_cont == al_dim["Y"]]):
point_cont = "U"
elif all([X_name_cont == al_dim["X"], Y_name_cont == al_dim["Yp1"]]):
point_cont = "V"
else:
point_cont = "G"
X_name_cont = _rename_aliased(od, varNameList="X" + point_cont)
Y_name_cont = _rename_aliased(od, varNameList="Y" + point_cont)
# Pop from kwargs
ax = kwargs.pop("ax", None)
col = kwargs.pop("col", None)
col_wrap = kwargs.pop("col_wrap", None)
subplot_kws = kwargs.pop("subplot_kws", None)
transform = kwargs.pop("transform", None)
# Projection
if ax is None:
if subplot_kws is None:
subplot_kws = dict(projection=od.projection)
elif "projection" not in subplot_kws.keys():
subplot_kws["projection"] = od.projection
elif ax and od.projection is not None and not hasattr(ax, "projection"):
od = od.set_projection(None)
_warnings.warn(
"\nSwitching projection off."
"If `ax` is passed, it needs"
" to be initialiazed with a projection.",
stacklevel=2,
)
kwargs["ax"] = ax
kwargs["subplot_kws"] = subplot_kws
# Multiple plots:
if len(dims) == 1:
extra_name = dims[0]
# TODO: For some reason, faceting and cartopy are not
# working very nice with our configurations
# Drop it for now, but we need to explore it more
sbp_kws_proj = kwargs["subplot_kws"].pop("projection", None)
if od.projection is not None or sbp_kws_proj is not None:
_warnings.warn(
"\nSwitch projection off."
" This function currently"
" does not support faceting for projected plots.",
stacklevel=2,
)
od = od.set_projection(None)
transform = None
sbp_kws_proj = None
kwargs["subplot_kws"]["projection"] = sbp_kws_proj
# Add col
if col is None:
col = extra_name
kwargs["col"] = col
kwargs["col_wrap"] = col_wrap
elif len(dims) != 0:
raise ValueError(
"There are too many dimensions: {}."
"A maximum of 3 dimensions (including time)"
" are supported."
"Reduce the number of dimensions using"
"`meanAxes` and/or `intAxes`".format(dims)
)
# Add transform
if transform is None and od.projection is not None:
kwargs["transform"] = _ccrs.PlateCarree()
# Plot
args = {"x": X_name, "y": Y_name, **kwargs}
plotfunc = eval("_xr.plot." + plotType)
p = plotfunc(da, **args)
# Contour
if contourName is not None:
ax = args.pop("ax", None)
transform = args.pop("transform", None)
col_keys = ["colors", "cmap"]
default_contour_kwargs = {
key: contour_kwargs.pop(key, None) for key in col_keys
}
if all(default_contour_kwargs[key] is None for key in col_keys):
default_contour_kwargs["colors"] = "gray"
contour_kwargs = {**default_contour_kwargs, **contour_kwargs}
args = {
"x": X_name_cont,
"y": Y_name_cont,
"ax": ax,
"transform": transform,
"add_labels": False,
**contour_kwargs,
}
if len(dims) == 0:
cont = da_contour.plot.contour(**args)
_plt.clabel(cont, **clabel_kwargs)
else:
for i, thisax in enumerate(p.axes.flat):
if extra_name in da_contour.dims:
da_contour_i = da_contour.isel({extra_name: i}).squeeze()
else:
da_contour_i = da_contour
cont = da_contour_i.plot.contour(**{**args, "ax": thisax})
_plt.clabel(cont, **clabel_kwargs)
# Labels and return
add_labels = kwargs.pop("add_labels", None)
if len(dims) == 0:
ax = _plt.gca()
if od.projection is None:
_plt.tight_layout()
else:
if add_labels is not False:
try:
gl = ax.gridlines(crs=transform, draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
except TypeError:
# Gridlines don't work with all projections
pass
return ax
else:
return p
def vertical_section(
od,
varName,
plotType="pcolormesh",
use_dist=True,
subsampMethod=None,
contourName=None,
meanAxes=False,
intAxes=False,
contour_kwargs=None,
clabel_kwargs=None,
subsamp_kwargs=None,
cutout_kwargs=None,
**kwargs
):
"""
Plot vertical section.
Parameters
----------
od: OceanDataset
oceandataset used to plot.
varName: str, None
Name of the variable to plot.
plotType: str
2D plot type.
Options: {'contourf', 'contour', 'imshow', 'pcolormesh'}
use_dist: bool
If True, use distances for x axis.
If False, use mooring or station.
subsampMethod: str, None
Subsample method.
Options: {'mooring_array', 'survey_station'}
contourName: str, None
Name of the variable to contour on top.
meanAxes: 1D array_like, str, or bool
List of axes over which to apply
:py:func:`oceanspy.compute.weighted_mean`.
If True,
set meanAxes= :py:attr:`oceanspy.OceanDataset.grid_coords`.
If False, skip weighted mean.
intAxes: 1D array_like, str, or bool
List of axes over which to apply
:py:func:`oceanspy.compute.integral`.
If True,
set intAxes= :py:attr:`oceanspy.OceanDataset.grid_coords`.
If False, skip integral.
contour_kwargs: dict
Keyword arguments for :py:func:`xarray.plot.contour`
clabel_kwargs: dict
Keyword arguments for :py:func:`matplotlib.pyplot.clabel`
subsamp_kwargs: dict
Keyword arguments for
:py:func:`oceanspy.subsample.mooring_array`
or :py:func:`oceanspy.subsample.survey_stations`
cutout_kwargs: dict
Keyword arguments for
:py:func:`oceanspy.subsample.cutout`
**kwargs:
Kewyword arguments for :py:mod:`xarray.plot`.plotType
Returns
-------
matplotlib.pyplot.axes or xarray.plot.FacetGrid
References
----------
http://xarray.pydata.org/en/stable/plotting.html
See Also
--------
oceanspy.animate.vertical_section
"""
# Check parameters
_check_instance(
{
"od": od,
"varName": varName,
"plotType": plotType,
"use_dist": use_dist,
"subsampMethod": subsampMethod,
"contourName": contourName,
"contour_kwargs": contour_kwargs,
"clabel_kwargs": clabel_kwargs,
"subsamp_kwargs": subsamp_kwargs,
},
{
"od": "oceanspy.OceanDataset",
"varName": "str",
"plotType": "str",
"use_dist": "bool",
"subsampMethod": ["type(None)", "str"],
"contourName": ["type(None)", "str"],
"contour_kwargs": ["type(None)", "dict"],
"clabel_kwargs": ["type(None)", "dict"],
"subsamp_kwargs": ["type(None)", "dict"],
},
)
# Check plot
_check_options(
name="plotType",
selected=plotType,
options=["contourf", "contour", "imshow", "pcolormesh"],
)
# Check subsample
if subsampMethod is not None:
# Check plot
_check_options(
name="subsampMethod",
selected=subsampMethod,
options=["mooring_array", "survey_stations"],
)
# Handle kwargs
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs = dict(contour_kwargs)
if clabel_kwargs is None:
clabel_kwargs = {}
clabel_kwargs = dict(clabel_kwargs)
if cutout_kwargs is None:
cutout_kwargs = {}
cutout_kwargs = dict(cutout_kwargs)
# For animation purposes.
if len(cutout_kwargs) != 0:
od = od.subsample.cutout(**cutout_kwargs)
# Subsample first
if subsamp_kwargs is not None and subsampMethod is not None:
if subsampMethod == "mooring_array":
od = od.subsample.mooring_array(**subsamp_kwargs)
else:
# survey_stations
od = od.subsample.survey_stations(**subsamp_kwargs)
# Check oceandataset
needed_dims = ["mooring", "station"]
if not any([dim in od.grid_coords.keys() for dim in needed_dims]):
raise ValueError(
"`plot.vertical_section` only supports"
" `od` with one of the following grid coordinates: "
"{}".format(needed_dims)
)
# Check variables and add
listName = [varName]
if contourName is not None:
listName = listName + [contourName]
_listName = _rename_aliased(od, listName)
od = _add_missing_variables(od, _listName)
# Check mean and int axes
meanAxes, intAxes = _check_mean_and_int_axes(
od=od,
meanAxes=meanAxes,
intAxes=intAxes,
exclude=["mooring", "station", "X", "Y", "Z"],
)
# Apply mean and sum
da, varName = _compute_mean_and_int(od, varName, meanAxes, intAxes)
# SQUEEZE! Otherwise animation don't show up
# because xarray makes a faceted plot
da = da.squeeze()
da, hor_name = _Vsection_regrid(od, da, varName)
ver_name = [dim for dim in od.grid_coords["Z"] if dim in da.dims][0]
da = da.squeeze()
# CONTOURNAME
if contourName is not None:
# Apply mean and sum
da_contour = od.dataset[contourName]
da_contour, contourName = _compute_mean_and_int(
od, contourName, meanAxes, intAxes
)
# SQUEEZE! Otherwise animation don't show up
# because xarray makes a faceted plot
da_contour = da_contour.squeeze()
# Get dimension names
da_contour, hor_name_cont = _Vsection_regrid(od, da_contour, contourName)
ver_name_cont = [dim for dim in od.grid_coords["Z"] if dim in da_contour.dims]
if len(ver_name_cont) != 1:
raise ValueError("Couldn't find Z dimension of [{}]" "".format(contourName))
else:
ver_name_cont = ver_name_cont[0]
da_contour = da_contour.squeeze()
# Check dimensions
dims = list(da.dims)
dims.remove(hor_name)
dims.remove(ver_name)
# Use distances
if use_dist:
if hor_name + "_dist" in da.coords:
hor_name = hor_name + "_dist"
hor_name_cont = hor_name
# Pop from kwargs
ax = kwargs.pop("ax", None)
col = kwargs.pop("col", None)
if len(dims) == 0:
# Single plot:
# Add ax
if ax is None:
ax = _plt.axes()
kwargs["ax"] = ax
else:
# Multiple plots:
extra_name = dims[0]
# Add col
if col is None:
col = extra_name
kwargs["col"] = col
# Plot
args = {"x": hor_name, "y": ver_name, **kwargs}
plotfunc = eval("_xr.plot." + plotType)
p = plotfunc(da, **args)
# Contour
if contourName is not None:
ax = args.pop("ax", None)
col_keys = ["colors", "cmap"]
default_contour_kwargs = {
key: contour_kwargs.pop(key, None) for key in col_keys
}
if all(default_contour_kwargs[key] is None for key in col_keys):
default_contour_kwargs["colors"] = "gray"
contour_kwargs = {**default_contour_kwargs, **contour_kwargs}
args = {
"x": hor_name_cont,
"y": ver_name_cont,
"ax": ax,
"add_labels": False,
**contour_kwargs,
}
if ax is not None:
cont = da_contour.plot.contour(**args)
_plt.clabel(cont, **clabel_kwargs)
else:
for i, thisax in enumerate(p.axes.flat):
if extra_name in da_contour.dims:
da_contour_i = da_contour.isel({extra_name: i}).squeeze()
else:
da_contour_i = da_contour
cont = da_contour_i.plot.contour(**{**args, "ax": thisax})
_plt.clabel(cont, **clabel_kwargs)
# Return
if ax is not None:
_plt.tight_layout()
return ax
else:
return p
def _compute_mean_and_int(od, varName, meanAxes, intAxes):
# Mean and sum
if meanAxes is not False:
ds = _weighted_mean(
od, varNameList=[varName], axesList=meanAxes, storeWeights=False
)
for var in ds.data_vars:
varName = var
od = od.merge_into_oceandataset(ds)
if intAxes is not False:
ds = _integral(od, varNameList=[varName], axesList=intAxes)
for var in ds.data_vars:
varName = var
od = od.merge_into_oceandataset(ds)
# Extract da
da = od.dataset[varName]
return da, varName
def _Vsection_regrid(od, da, varName):
if "mooring" in od.grid_coords:
# Time coordinates
if "time" in od.grid_coords.keys():
time_coords = {
timeName: da[timeName]
for timeName in od.grid_coords["time"].keys()
if timeName in da.coords
}
else:
time_coords = {}
# Regrid to center dim
for axis in ["X", "Y"]:
dim2regrid = [
dim
for dim in od.grid_coords[axis]
if (od.grid_coords[axis][dim] is not None and dim in da.dims)
]
if len(dim2regrid) != 0:
print("Regridding [{}] along [{}]-axis." "".format(varName, axis))
da_attrs = da.attrs
da = od.grid.interp(da, axis)
da.attrs = da_attrs
hor_name = [dim for dim in od.grid_coords["mooring"] if dim in da.dims]
if len(hor_name) != 1:
raise ValueError(
"Couldn't find `mooring` dimension of [{}]" "".format(varName)
)
else:
hor_name = hor_name[0]
da = da.assign_coords(**time_coords)
if hor_name + "_dist" in od._ds.coords:
da = da.assign_coords(
**{hor_name + "_dist": od._ds[hor_name + "_dist"]}
)
for toRem in ["X", "Y", "Xp1", "Yp1"]:
toRem = _rename_aliased(od, varNameList=toRem)
if toRem in da.coords:
da = da.drop_vars(toRem)
else:
# Station
hor_name = [dim for dim in od.grid_coords["station"] if dim in da.dims]
if len(hor_name) != 1:
raise ValueError(
"Couldn't find `station` dimension of [{}]" "".format(varName)
)
else:
hor_name = hor_name[0]
return da, hor_name
class _plotMethods(object):
"""
Enables use of functions as OceanDataset attributes.
"""
def __init__(self, od):
self._od = od
@_functools.wraps(TS_diagram)
def TS_diagram(self, **kwargs):
return TS_diagram(self._od, **kwargs)
@_functools.wraps(time_series)
def time_series(self, **kwargs):
return time_series(self._od, **kwargs)
@_functools.wraps(horizontal_section)
def horizontal_section(self, **kwargs):
return horizontal_section(self._od, **kwargs)
@_functools.wraps(vertical_section)
def vertical_section(self, **kwargs):
return vertical_section(self._od, **kwargs)
|
the-stack_106_29949 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
#from mliv.neuralnet.deepiv_fit import deep_iv_fit
from mliv.neuralnet.rbflayer import gaussian, inverse_multiquadric
from mliv.neuralnet import AGMM, KernelLayerMMDGMM, CentroidMMDGMM, KernelLossAGMM, MMDGMM
p = 0.1 # dropout prob of dropout layers throughout notebook
n_hidden = 100 # width of hidden layers throughout notebook
# For any method that use a projection of z into features g(z)
g_features = 100
# The kernel function
kernel_fn = gaussian
# kernel_fn = inverse_multiquadric
# Training params
learner_lr = 1e-4
adversary_lr = 1e-4
learner_l2 = 1e-3
adversary_l2 = 1e-4
adversary_norm_reg = 1e-3
n_epochs = 300
bs = 100
sigma = 2.0 / g_features
n_centers = 100
device = torch.cuda.current_device() if torch.cuda.is_available() else None
def _get_learner(n_t):
return nn.Sequential(nn.Dropout(p=p), nn.Linear(n_t, n_hidden), nn.LeakyReLU(),
nn.Dropout(p=p), nn.Linear(n_hidden, 1))
def _get_adversary(n_z):
return nn.Sequential(nn.Dropout(p=p), nn.Linear(n_z, n_hidden), nn.LeakyReLU(),
nn.Dropout(p=p), nn.Linear(n_hidden, 1))
def _get_adversary_g(n_z):
return nn.Sequential(nn.Dropout(p=p), nn.Linear(n_z, n_hidden), nn.LeakyReLU(),
nn.Dropout(p=p), nn.Linear(n_hidden, g_features), nn.ReLU())
def _get(opts, key, default):
return opts[key] if (key in opts) else default
def _get_model_opt(opts, key, default):
model_enc = _get(opts, 'model', default)
return ('avg' if model_enc == 0 else 'final')
def agmm(data, opts):
print("GPU:", torch.cuda.is_available())
T_test, Z, T, Y = map(lambda x: torch.Tensor(x), data)
learner = _get_learner(T.shape[1])
adversary_fn = _get_adversary(Z.shape[1])
agmm = AGMM(learner, adversary_fn).fit(Z, T, Y, learner_lr=learner_lr, adversary_lr=adversary_lr,
learner_l2=learner_l2, adversary_l2=adversary_l2,
n_epochs=_get(
opts, 'n_epochs', n_epochs),
bs=_get(opts, 'bs', bs),
model_dir=str(Path.home()),
device=device)
return agmm.predict(T_test.to(device),
model=_get_model_opt(opts, 'model', 0), burn_in=_get(opts, 'burnin', 0))
def klayerfixed(data, opts):
T_test, Z, T, Y = map(lambda x: torch.Tensor(x), data)
n_z = Z.shape[1]
centers = np.tile(
np.linspace(-4, 4, n_centers).reshape(-1, 1), (1, n_z))
sigmas = np.ones((n_centers,)) * 2 / n_z
learner = _get_learner(T.shape[1])
mmdgmm_fixed = KernelLayerMMDGMM(learner, lambda x: x, n_z, n_centers, kernel_fn,
centers=centers, sigmas=sigmas, trainable=False)
mmdgmm_fixed.fit(Z, T, Y, learner_l2=learner_l2, adversary_l2=adversary_l2,
adversary_norm_reg=adversary_norm_reg,
learner_lr=learner_lr, adversary_lr=adversary_lr,
n_epochs=_get(opts, 'n_epochs', n_epochs),
bs=_get(opts, 'bs', bs),
model_dir=str(Path.home()),
device=device)
return mmdgmm_fixed.predict(T_test.to(device),
model=_get_model_opt(opts, 'model', 0), burn_in=_get(opts, 'burnin', 0))
def klayertrained(data, opts):
T_test, Z, T, Y = map(lambda x: torch.Tensor(x), data)
centers = np.random.uniform(-4, 4, size=(n_centers, g_features))
sigmas = np.ones((n_centers,)) * sigma
learner = _get_learner(T.shape[1])
adversary_g = _get_adversary_g(Z.shape[1])
klayermmdgmm = KernelLayerMMDGMM(learner, adversary_g, g_features,
n_centers, kernel_fn, centers=centers, sigmas=sigmas)
klayermmdgmm.fit(Z, T, Y, learner_l2=learner_l2, adversary_l2=adversary_l2,
adversary_norm_reg=adversary_norm_reg,
learner_lr=learner_lr, adversary_lr=adversary_lr,
n_epochs=_get(opts, 'n_epochs', n_epochs),
bs=_get(opts, 'bs', bs),
model_dir=str(Path.home()),
device=device)
return klayermmdgmm.predict(T_test.to(device),
model=_get_model_opt(opts, 'model', 0), burn_in=_get(opts, 'burnin', 0))
def centroidmmd(data, opts):
_, Z, _, _ = data
centers = KMeans(n_clusters=n_centers).fit(Z).cluster_centers_
T_test, Z, T, Y = map(lambda x: torch.Tensor(x), data)
learner = _get_learner(T.shape[1])
adversary_g = _get_adversary_g(Z.shape[1])
centroid_mmd = CentroidMMDGMM(
learner, adversary_g, kernel_fn, centers, np.ones(n_centers) * sigma)
centroid_mmd.fit(Z, T, Y, learner_l2=learner_l2, adversary_l2=adversary_l2,
adversary_norm_reg=adversary_norm_reg,
learner_lr=learner_lr, adversary_lr=adversary_lr,
n_epochs=_get(opts, 'n_epochs', n_epochs),
bs=_get(opts, 'bs', bs),
model_dir=str(Path.home()),
device=device)
return centroid_mmd.predict(T_test.to(device),
model=_get_model_opt(opts, 'model', 0), burn_in=_get(opts, 'burnin', 0))
def klossgmm(data, opts):
T_test, Z, T, Y = map(lambda x: torch.Tensor(x), data)
learner = _get_learner(T.shape[1])
adversary_g = _get_adversary_g(Z.shape[1])
kernelgmm = KernelLossAGMM(learner, adversary_g, kernel_fn, sigma)
kernelgmm.fit(Z, T, Y, learner_l2=learner_l2**2, adversary_l2=adversary_l2,
learner_lr=learner_lr, adversary_lr=adversary_lr,
n_epochs=_get(opts, 'n_epochs', n_epochs),
bs=_get(opts, 'bs', bs),
model_dir=str(Path.home()),
device=device)
return kernelgmm.predict(T_test.to(device),
model=_get_model_opt(opts, 'model', 0), burn_in=_get(opts, 'burnin', 0))
|
the-stack_106_29951 | import tensorflow as tf
from baselines.ppo2 import ppo2
from baselines.common.models import build_impala_cnn
from baselines.common.mpi_util import setup_mpi_gpus
from procgen import ProcgenEnv
from baselines.common.vec_env import (
VecExtractDictObs,
VecMonitor,
VecFrameStack,
VecNormalize
)
from baselines import logger
from mpi4py import MPI
import argparse
def train_fn(env_name, num_envs, distribution_mode, num_levels, start_level, timesteps_per_proc, is_test_worker=False, log_dir='~/Desktop/cs182/final/train_procgen/results', comm=None):
learning_rate = 5e-4
ent_coef = .01
gamma = .999
lam = .95
# nsteps = 256
nsteps = 16
nminibatches = 8
ppo_epochs = 3
# ppo_epochs = 1
clip_range = .2
use_vf_clipping = True
mpi_rank_weight = 0 if is_test_worker else 1
num_levels = 0 if is_test_worker else num_levels
if log_dir is not None:
log_comm = comm.Split(1 if is_test_worker else 0, 0)
format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []
logger.configure(comm=log_comm, dir=log_dir, format_strs=format_strs)
logger.info("creating environment")
venv = ProcgenEnv(num_envs=num_envs, env_name=env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode)
venv = VecExtractDictObs(venv, "rgb")
venv = VecMonitor(
venv=venv, filename=None, keep_buf=100,
)
venv = VecNormalize(venv=venv, ob=False)
logger.info("creating tf session")
setup_mpi_gpus()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
sess = tf.Session(config=config)
sess.__enter__()
conv_fn = lambda x: build_impala_cnn(x, depths=[16,32,32], emb_size=256)
logger.info("training")
ppo2.learn(
env=venv,
network=conv_fn,
# total_timesteps=timesteps_per_proc,
total_timesteps=10240,
save_interval=0,
nsteps=nsteps,
nminibatches=nminibatches,
lam=lam,
gamma=gamma,
noptepochs=ppo_epochs,
log_interval=1,
ent_coef=ent_coef,
mpi_rank_weight=mpi_rank_weight,
clip_vf=use_vf_clipping,
comm=comm,
lr=learning_rate,
cliprange=clip_range,
update_fn=None,
init_fn=None,
vf_coef=0.5,
max_grad_norm=0.5,
)
def main():
parser = argparse.ArgumentParser(description='Process procgen training arguments.')
parser.add_argument('--env_name', type=str, default='coinrun')
parser.add_argument('--num_envs', type=int, default=64)
parser.add_argument('--distribution_mode', type=str, default='hard', choices=["easy", "hard", "exploration", "memory", "extreme"])
parser.add_argument('--num_levels', type=int, default=0)
parser.add_argument('--start_level', type=int, default=0)
parser.add_argument('--test_worker_interval', type=int, default=0)
parser.add_argument('--timesteps_per_proc', type=int, default=50_000_000)
args = parser.parse_args()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
is_test_worker = False
test_worker_interval = args.test_worker_interval
if test_worker_interval > 0:
is_test_worker = rank % test_worker_interval == (test_worker_interval - 1)
train_fn(args.env_name,
args.num_envs,
args.distribution_mode,
args.num_levels,
args.start_level,
args.timesteps_per_proc,
is_test_worker=is_test_worker,
comm=comm)
if __name__ == '__main__':
main()
|
the-stack_106_29956 | from typing import Tuple, List
from gomoku_enum import Player
import itertools
import random
import logging
import sys
logger = logging.getLogger("Brain")
#logger.setLevel("DEBUG")
DEFAULT_NB_LAYER = 1
class Brain:
def __init__(self, board, path=None, machine_learning_mode=False):
self.board = board # type: Board
# if machine_learning_mode:
# self.network = (NeuralNetwork.unserialize(path) if path is not None
# else NeuralNetwork(self.board.size * self.board.size, DEFAULT_NB_LAYER, 2, (self.board.size + 1) ** 2))
def begin(self):
"""
Returns:
Tuple[int, int]: the x and y coordonate for put a piece
"""
v = self.board.size // 2
return (v, v)
def play(self):
"""
Returns:
Tuple[int, int]: the x and y coordonate for put a piece
"""
d = self.get_euristic_tab()
logger.debug('\n'.join([f"{k} : {v}" for k, v in sorted(d.items(), key=lambda x: x[1], reverse=True) if v]))
r = max(d.items(), key=lambda x: x[1])
return r[0]
#return self.put_rock_randomly()
def get_euristic_tab(self):
"""
Returns:
Dict[Tuple[int, int], int]: A dict with coordinate as key and euristic board value as value
"""
d = {i: 0 for i in ((x,y) for x, y, v in self.board.enumerate() if not v)}
for ls in self.board.enumerate_comp():
for x, y, pl in ls:
if (x, y) not in d:
continue
l = [v for _, _, v in ls]
nb_none, nb_ally, nb_enemy = (sum(v == p for x, y, v in ls) for p in (Player.NONE, Player.ALLY, Player.ENEMY))
if nb_ally and nb_enemy: # This chunck can't won
continue
if nb_ally == 4:
d[(x, y)] = 10000000
continue
if nb_enemy == 4:
d[(x, y)] = 1000000
continue
d[(x, y)] += self.no_border_pattern_bonus(l)
d[(x, y)] += (max(nb_ally, nb_enemy) ** 3) * nb_none
return d
def no_border_pattern_bonus(self, ls):
"""
Args:
ls (List[Tuple[int, int, Player]]):
Returns:
int
"""
for pl in (1, 2):
if ls == [0, pl, pl, pl, 0]:
return 100
return 0
def play_machine_learning(self):
output = list(self.network.get_output([player.value for _, _, player in self.board.enumerate()]))
logger.debug(f"Ai result is {output}")
return tuple(int(val * 19) for val in output)
def put_rock_randomly(self):
coords = [(i, j) for i, j, v in self.board.enumerate() if not v]
return random.choice(coords)
|
the-stack_106_29958 | """Contains all the Django fields for select2-chained."""
import copy
import logging
from django_select2.fields import NO_ERR_RESP, AutoModelSelect2Field
from .widgets import ChainedAutoSelect2Widget
__all__ = (
'ChainedAutoModelSelect2FieldMixin',
'ChainedAutoModelSelect2Field',
'RequestSpecificAutoModelSelect2Field',
'ChainedRequestSpecificAutoModelSelect2Field'
)
logger = logging.getLogger(__name__)
class RequestSpecificAutoModelSelect2Field(AutoModelSelect2Field):
"""
An AutoHeavy field whose queryset is determined from the current request parameter.
This is done by using get_queryset method instead of the class-level queryset property.
Allows using the same AutoHeavy field instance between multiple requests and having request-specific data.
"""
def get_queryset(self, request):
"""
Method that determines the queryset from the current request.
Must be implemented.
"""
raise NotImplementedError("get_queryset() must be implemented.")
def get_results(self, request, term, page, context):
"""
See :py:meth:`.views.Select2View.get_results`.
This implementation takes care of detecting if more results are available.
"""
if not hasattr(self, 'search_fields') or not self.search_fields:
raise ValueError('search_fields is required.')
# Add request parameter to get_queryset.
qs = self.get_queryset(request)
if qs is not None:
qs = copy.deepcopy(qs)
params = self.prepare_qs_params(request, term, self.search_fields)
if self.max_results:
min_ = (page - 1) * self.max_results
max_ = min_ + self.max_results + 1 # Fetch one extra row to check if there are more rows.
res = list(qs.filter(*params['or'], **params['and'])[min_:max_])
has_more = len(res) == (max_ - min_)
if has_more:
res = res[:-1]
else:
res = list(qs.filter(*params['or'], **params['and']))
has_more = False
res = [(getattr(obj, self.to_field_name), self.label_from_instance(obj), self.extra_data_from_instance(obj))
for obj in res]
else:
res = []
has_more = False
return NO_ERR_RESP, has_more, res
class ChainedAutoModelSelect2FieldMixin(object):
"""
A mixin for subclasses of AutoModelSelect2Field that adds chaining functionality.
The attached field gets filtered by another field in the form, specified by the chain_field attribute.
The selected option in the chain_field limits the queryset in the current field.
"""
def __init__(self, *args, **kwargs):
"""
Init method.
:param chain_field: related field name
:param model_field: real foreign key field name in the model
:param allow_empty: if true, displays all options when related field is empty
"""
self.chain_field = kwargs.pop('chain_field', None)
self.model_field = kwargs.pop('model_field', self.chain_field)
self.allow_empty = kwargs.pop('allow_empty', None)
select2_options = kwargs.pop('select2_options', None)
widget = kwargs.get('widget', None)
if not widget:
widget = ChainedAutoSelect2Widget(chain_field=self.chain_field, model_field=self.model_field,
select2_options=select2_options)
kwargs.update({
'widget': widget
})
self.chain_field = self.chain_field or widget.chain_field
self.model_field = self.model_field or widget.model_field
if not self.chain_field or not self.model_field:
raise NotImplementedError(u"Chain field and model field must be specified.")
super(ChainedAutoModelSelect2FieldMixin, self).__init__(*args, **kwargs)
def prepare_qs_params(self, request, search_term, search_fields):
"""Prepare queryset parameters for filtering."""
params = super(ChainedAutoModelSelect2FieldMixin, self).prepare_qs_params(request, search_term, search_fields)
chain_field_id = request.GET.get(self.chain_field, None)
if chain_field_id:
params['and'].update({self.model_field: chain_field_id})
elif not self.allow_empty:
params['and'].update({'pk__isnull': True})
return params
class ChainedAutoModelSelect2Field(ChainedAutoModelSelect2FieldMixin, AutoModelSelect2Field):
"""An :py:class:`AutoModelSelect2Field` with chaining functionality."""
pass
class ChainedRequestSpecificAutoModelSelect2Field(ChainedAutoModelSelect2FieldMixin,
RequestSpecificAutoModelSelect2Field):
"""A :py:class:`RequestSpecificAutoModelSelect2Field` with chaining functionality."""
pass
|
the-stack_106_29959 | # coding: utf-8
"""Scikit-learn wrapper interface for LightGBM."""
from __future__ import absolute_import
import warnings
import numpy as np
from .basic import Dataset, LightGBMError, _ConfigAliases
from .compat import (SKLEARN_INSTALLED, SKLEARN_VERSION, _LGBMClassifierBase,
LGBMNotFittedError, _LGBMLabelEncoder, _LGBMModelBase,
_LGBMRegressorBase, _LGBMCheckXY, _LGBMCheckArray, _LGBMCheckConsistentLength,
_LGBMAssertAllFinite, _LGBMCheckClassificationTargets, _LGBMComputeSampleWeight,
argc_, range_, zip_, string_type, DataFrame, DataTable)
from .engine import train
class _ObjectiveFunctionWrapper(object):
"""Proxy class for objective function."""
def __init__(self, func):
"""Construct a proxy class.
This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
as expected by ``lightgbm.engine.train``.
Parameters
----------
func : callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)
and returns (grad, hess):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
group : array-like
Group/query data, used for ranking task.
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the first order derivative (gradient) for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second order derivative (Hessian) for each sample point.
.. note::
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
"""
self.func = func
def __call__(self, preds, dataset):
"""Call passed function with appropriate arguments.
Parameters
----------
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
dataset : Dataset
The training dataset.
Returns
-------
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the first order derivative (gradient) for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second order derivative (Hessian) for each sample point.
"""
labels = dataset.get_label()
argc = argc_(self.func)
if argc == 2:
grad, hess = self.func(labels, preds)
elif argc == 3:
grad, hess = self.func(labels, preds, dataset.get_group())
else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective"""
weight = dataset.get_weight()
if weight is not None:
"""only one class"""
if len(weight) == len(grad):
grad = np.multiply(grad, weight)
hess = np.multiply(hess, weight)
else:
num_data = len(weight)
num_class = len(grad) // num_data
if num_class * num_data != len(grad):
raise ValueError("Length of grad and hess should equal to num_class * num_data")
for k in range_(num_class):
for i in range_(num_data):
idx = k * num_data + i
grad[idx] *= weight[i]
hess[idx] *= weight[i]
return grad, hess
class _EvalFunctionWrapper(object):
"""Proxy class for evaluation function."""
def __init__(self, func):
"""Construct a proxy class.
This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
as expected by ``lightgbm.engine.train``.
Parameters
----------
func : callable
Expects a callable with following signatures:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and returns (eval_name, eval_result, is_higher_better) or
list of (eval_name, eval_result, is_higher_better):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
weight : array-like of shape = [n_samples]
The weight of samples.
group : array-like
Group/query data, used for ranking task.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
.. note::
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""
self.func = func
def __call__(self, preds, dataset):
"""Call passed function with appropriate arguments.
Parameters
----------
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
dataset : Dataset
The training dataset.
Returns
-------
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
"""
labels = dataset.get_label()
argc = argc_(self.func)
if argc == 2:
return self.func(labels, preds)
elif argc == 3:
return self.func(labels, preds, dataset.get_weight())
elif argc == 4:
return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
class LGBMModel(_LGBMModelBase):
"""Implementation of the scikit-learn API for LightGBM."""
def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None,
min_split_gain=0., min_child_weight=1e-3, min_child_samples=20,
subsample=1., subsample_freq=0, colsample_bytree=1.,
reg_alpha=0., reg_lambda=0., random_state=None,
n_jobs=-1, silent=True, importance_type='split', **kwargs):
r"""Construct a gradient boosting model.
Parameters
----------
boosting_type : string, optional (default='gbdt')
'gbdt', traditional Gradient Boosting Decision Tree.
'dart', Dropouts meet Multiple Additive Regression Trees.
'goss', Gradient-based One-Side Sampling.
'rf', Random Forest.
num_leaves : int, optional (default=31)
Maximum tree leaves for base learners.
max_depth : int, optional (default=-1)
Maximum tree depth for base learners, <=0 means no limit.
learning_rate : float, optional (default=0.1)
Boosting learning rate.
You can use ``callbacks`` parameter of ``fit`` method to shrink/adapt learning rate
in training using ``reset_parameter`` callback.
Note, that this will ignore the ``learning_rate`` argument in training.
n_estimators : int, optional (default=100)
Number of boosted trees to fit.
subsample_for_bin : int, optional (default=200000)
Number of samples for constructing bins.
objective : string, callable or None, optional (default=None)
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
Default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
class_weight : dict, 'balanced' or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
Use this parameter only for multi-class classification task;
for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
Note, that the usage of all these parameters will result in poor estimates of the individual class probabilities.
You may want to consider performing probability calibration
(https://scikit-learn.org/stable/modules/calibration.html) of your model.
The 'balanced' mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
If None, all classes are supposed to have weight one.
Note, that these weights will be multiplied with ``sample_weight`` (passed through the ``fit`` method)
if ``sample_weight`` is specified.
min_split_gain : float, optional (default=0.)
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : float, optional (default=1e-3)
Minimum sum of instance weight (hessian) needed in a child (leaf).
min_child_samples : int, optional (default=20)
Minimum number of data needed in a child (leaf).
subsample : float, optional (default=1.)
Subsample ratio of the training instance.
subsample_freq : int, optional (default=0)
Frequence of subsample, <=0 means no enable.
colsample_bytree : float, optional (default=1.)
Subsample ratio of columns when constructing each tree.
reg_alpha : float, optional (default=0.)
L1 regularization term on weights.
reg_lambda : float, optional (default=0.)
L2 regularization term on weights.
random_state : int or None, optional (default=None)
Random number seed.
If None, default seeds in C++ code will be used.
n_jobs : int, optional (default=-1)
Number of parallel threads.
silent : bool, optional (default=True)
Whether to print messages while running boosting.
importance_type : string, optional (default='split')
The type of feature importance to be filled into ``feature_importances_``.
If 'split', result contains numbers of times the feature is used in a model.
If 'gain', result contains total gains of splits which use the feature.
**kwargs
Other parameters for the model.
Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
.. warning::
\*\*kwargs is not supported in sklearn, it may cause unexpected issues.
Attributes
----------
n_features_ : int
The number of features of fitted model.
classes_ : array of shape = [n_classes]
The class label array (only for classification problem).
n_classes_ : int
The number of classes (only for classification problem).
best_score_ : dict or None
The best score of fitted model.
best_iteration_ : int or None
The best iteration of fitted model if ``early_stopping_rounds`` has been specified.
objective_ : string or callable
The concrete objective used while fitting this model.
booster_ : Booster
The underlying Booster of this model.
evals_result_ : dict or None
The evaluation results if ``early_stopping_rounds`` has been specified.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
Note
----
A custom objective function can be provided for the ``objective`` parameter.
In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess`` or
``objective(y_true, y_pred, group) -> grad, hess``:
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
group : array-like
Group/query data, used for ranking task.
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the first order derivative (gradient) for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
"""
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for this module')
elif SKLEARN_VERSION > '0.21.3':
raise RuntimeError("The last supported version of scikit-learn is 0.21.3.\n"
"Found version: {0}.".format(SKLEARN_VERSION))
self.boosting_type = boosting_type
self.objective = objective
self.num_leaves = num_leaves
self.max_depth = max_depth
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.subsample_for_bin = subsample_for_bin
self.min_split_gain = min_split_gain
self.min_child_weight = min_child_weight
self.min_child_samples = min_child_samples
self.subsample = subsample
self.subsample_freq = subsample_freq
self.colsample_bytree = colsample_bytree
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.random_state = random_state
self.n_jobs = n_jobs
self.silent = silent
self.importance_type = importance_type
self._Booster = None
self._evals_result = None
self._best_score = None
self._best_iteration = None
self._other_params = {}
self._objective = objective
self.class_weight = class_weight
self._class_weight = None
self._class_map = None
self._n_features = None
self._classes = None
self._n_classes = None
self.set_params(**kwargs)
def _more_tags(self):
return {'allow_nan': True,
'X_types': ['2darray', 'sparse', '1dlabels']}
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : bool, optional (default=True)
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = super(LGBMModel, self).get_params(deep=deep)
params.update(self._other_params)
return params
def set_params(self, **params):
"""Set the parameters of this estimator.
Parameters
----------
**params
Parameter names with their new values.
Returns
-------
self : object
Returns self.
"""
for key, value in params.items():
setattr(self, key, value)
if hasattr(self, '_' + key):
setattr(self, '_' + key, value)
self._other_params[key] = value
return self
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_group=None,
eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Build a gradient boosting model from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input feature matrix.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in regression).
sample_weight : array-like of shape = [n_samples] or None, optional (default=None)
Weights of training data.
init_score : array-like of shape = [n_samples] or None, optional (default=None)
Init score of training data.
group : array-like or None, optional (default=None)
Group data of training data.
eval_set : list or None, optional (default=None)
A list of (X, y) tuple pairs to use as validation sets.
eval_names : list of strings or None, optional (default=None)
Names of eval_set.
eval_sample_weight : list of arrays or None, optional (default=None)
Weights of eval data.
eval_class_weight : list or None, optional (default=None)
Class weights of eval data.
eval_init_score : list of arrays or None, optional (default=None)
Init score of eval data.
eval_group : list of arrays or None, optional (default=None)
Group data of eval data.
eval_metric : string, list of strings, callable or None, optional (default=None)
If string, it should be a built-in evaluation metric to use.
If callable, it should be a custom evaluation metric, see note below for more details.
In either case, the ``metric`` from the model parameters will be evaluated and used as well.
Default: 'l2' for LGBMRegressor, 'logloss' for LGBMClassifier, 'ndcg' for LGBMRanker.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping. The model will train until the validation score stops improving.
Validation score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric, set the ``first_metric_only`` parameter to ``True``
in additional parameters ``**kwargs`` of the model constructor.
verbose : bool or int, optional (default=True)
Requires at least one evaluation data.
If True, the eval metric on the eval set is printed at each boosting stage.
If int, the eval metric on the eval set is printed at every ``verbose`` boosting stage.
The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.
.. rubric:: Example
With ``verbose`` = 4 and at least one item in ``eval_set``,
an evaluation metric is printed every 4 (instead of 1) boosting stages.
feature_name : list of strings or 'auto', optional (default='auto')
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default='auto')
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
callbacks : list of callback functions or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
init_model : string, Booster, LGBMModel or None, optional (default=None)
Filename of LightGBM model, Booster instance or LGBMModel instance used for continue training.
Returns
-------
self : object
Returns self.
Note
----
Custom eval function expects a callable with following signatures:
``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
``func(y_true, y_pred, weight, group)``
and returns (eval_name, eval_result, is_higher_better) or
list of (eval_name, eval_result, is_higher_better):
y_true : array-like of shape = [n_samples]
The target values.
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
weight : array-like of shape = [n_samples]
The weight of samples.
group : array-like
Group/query data, used for ranking task.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""
if self._objective is None:
if isinstance(self, LGBMRegressor):
self._objective = "regression"
elif isinstance(self, LGBMClassifier):
self._objective = "binary"
elif isinstance(self, LGBMRanker):
self._objective = "lambdarank"
else:
raise ValueError("Unknown LGBMModel type.")
if callable(self._objective):
self._fobj = _ObjectiveFunctionWrapper(self._objective)
else:
self._fobj = None
evals_result = {}
params = self.get_params()
# user can set verbose with kwargs, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and self.silent:
params['verbose'] = -1
params.pop('silent', None)
params.pop('importance_type', None)
params.pop('n_estimators', None)
params.pop('class_weight', None)
for alias in _ConfigAliases.get('objective'):
params.pop(alias, None)
if self._n_classes is not None and self._n_classes > 2:
for alias in _ConfigAliases.get('num_class'):
params.pop(alias, None)
params['num_class'] = self._n_classes
if hasattr(self, '_eval_at'):
for alias in _ConfigAliases.get('eval_at'):
params.pop(alias, None)
params['eval_at'] = self._eval_at
params['objective'] = self._objective
if self._fobj:
params['objective'] = 'None' # objective = nullptr for unknown objective
if callable(eval_metric):
feval = _EvalFunctionWrapper(eval_metric)
else:
feval = None
# register default metric for consistency with callable eval_metric case
original_metric = self._objective if isinstance(self._objective, string_type) else None
if original_metric is None:
# try to deduce from class instance
if isinstance(self, LGBMRegressor):
original_metric = "l2"
elif isinstance(self, LGBMClassifier):
original_metric = "multi_logloss" if self._n_classes > 2 else "binary_logloss"
elif isinstance(self, LGBMRanker):
original_metric = "ndcg"
# overwrite default metric by explicitly set metric
for metric_alias in _ConfigAliases.get("metric"):
if metric_alias in params:
original_metric = params.pop(metric_alias)
# concatenate metric from params (or default if not provided in params) and eval_metric
original_metric = [original_metric] if isinstance(original_metric, (string_type, type(None))) else original_metric
eval_metric = [eval_metric] if isinstance(eval_metric, (string_type, type(None))) else eval_metric
params['metric'] = [e for e in eval_metric if e not in original_metric] + original_metric
params['metric'] = [metric for metric in params['metric'] if metric is not None]
if not isinstance(X, (DataFrame, DataTable)):
_X, _y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
_LGBMCheckConsistentLength(_X, _y, sample_weight)
else:
_X, _y = X, y
if self._class_weight is None:
self._class_weight = self.class_weight
if self._class_weight is not None:
class_sample_weight = _LGBMComputeSampleWeight(self._class_weight, y)
if sample_weight is None or len(sample_weight) == 0:
sample_weight = class_sample_weight
else:
sample_weight = np.multiply(sample_weight, class_sample_weight)
self._n_features = _X.shape[1]
def _construct_dataset(X, y, sample_weight, init_score, group, params):
return Dataset(X, label=y, weight=sample_weight, group=group,
init_score=init_score, params=params)
train_set = _construct_dataset(_X, _y, sample_weight, init_score, group, params)
valid_sets = []
if eval_set is not None:
def _get_meta_data(collection, name, i):
if collection is None:
return None
elif isinstance(collection, list):
return collection[i] if len(collection) > i else None
elif isinstance(collection, dict):
return collection.get(i, None)
else:
raise TypeError('{} should be dict or list'.format(name))
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, valid_data in enumerate(eval_set):
# reduce cost for prediction training data
if valid_data[0] is X and valid_data[1] is y:
valid_set = train_set
else:
valid_weight = _get_meta_data(eval_sample_weight, 'eval_sample_weight', i)
valid_class_weight = _get_meta_data(eval_class_weight, 'eval_class_weight', i)
if valid_class_weight is not None:
if isinstance(valid_class_weight, dict) and self._class_map is not None:
valid_class_weight = {self._class_map[k]: v for k, v in valid_class_weight.items()}
valid_class_sample_weight = _LGBMComputeSampleWeight(valid_class_weight, valid_data[1])
if valid_weight is None or len(valid_weight) == 0:
valid_weight = valid_class_sample_weight
else:
valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
valid_init_score = _get_meta_data(eval_init_score, 'eval_init_score', i)
valid_group = _get_meta_data(eval_group, 'eval_group', i)
valid_set = _construct_dataset(valid_data[0], valid_data[1],
valid_weight, valid_init_score, valid_group, params)
valid_sets.append(valid_set)
if isinstance(init_model, LGBMModel):
init_model = init_model.booster_
self._Booster = train(params, train_set,
self.n_estimators, valid_sets=valid_sets, valid_names=eval_names,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, fobj=self._fobj, feval=feval,
verbose_eval=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks, init_model=init_model)
if evals_result:
self._evals_result = evals_result
if early_stopping_rounds is not None:
self._best_iteration = self._Booster.best_iteration
self._best_score = self._Booster.best_score
# free dataset
self._Booster.free_dataset()
del train_set, valid_sets
return self
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted value for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
**kwargs
Other parameters for the prediction.
Returns
-------
predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]
The predicted values.
X_leaves : array-like of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the feature contributions for each sample.
"""
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not isinstance(X, (DataFrame, DataTable)):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
return self._Booster.predict(X, raw_score=raw_score, num_iteration=num_iteration,
pred_leaf=pred_leaf, pred_contrib=pred_contrib, **kwargs)
@property
def n_features_(self):
"""Get the number of features of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
return self._n_features
@property
def best_score_(self):
"""Get the best score of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
return self._best_score
@property
def best_iteration_(self):
"""Get the best iteration of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping_rounds beforehand.')
return self._best_iteration
@property
def objective_(self):
"""Get the concrete objective used while fitting this model."""
if self._n_features is None:
raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
return self._objective
@property
def booster_(self):
"""Get the underlying lightgbm Booster of this model."""
if self._Booster is None:
raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
return self._Booster
@property
def evals_result_(self):
"""Get the evaluation results."""
if self._n_features is None:
raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
return self._evals_result
@property
def feature_importances_(self):
"""Get feature importances.
.. note::
``importance_type`` attribute is passed to the function
to configure the type of importance values to be extracted.
"""
if self._n_features is None:
raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
return self._Booster.feature_importance(importance_type=self.importance_type)
class LGBMRegressor(LGBMModel, _LGBMRegressorBase):
"""LightGBM regressor."""
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_metric=None, early_stopping_rounds=None,
verbose=True, feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is inherited from the LGBMModel."""
super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks, init_model=init_model)
return self
_base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]
+ _base_doc[_base_doc.find('eval_init_score :'):])
class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
"""LightGBM classifier."""
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_metric=None,
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is inherited from the LGBMModel."""
_LGBMAssertAllFinite(y)
_LGBMCheckClassificationTargets(y)
self._le = _LGBMLabelEncoder().fit(y)
_y = self._le.transform(y)
self._class_map = dict(zip_(self._le.classes_, self._le.transform(self._le.classes_)))
if isinstance(self.class_weight, dict):
self._class_weight = {self._class_map[k]: v for k, v in self.class_weight.items()}
self._classes = self._le.classes_
self._n_classes = len(self._classes)
if self._n_classes > 2:
# Switch to using a multiclass objective in the underlying LGBM instance
ova_aliases = {"multiclassova", "multiclass_ova", "ova", "ovr"}
if self._objective not in ova_aliases and not callable(self._objective):
self._objective = "multiclass"
if eval_metric in {'logloss', 'binary_logloss'}:
eval_metric = "multi_logloss"
elif eval_metric in {'error', 'binary_error'}:
eval_metric = "multi_error"
else:
if eval_metric in {'logloss', 'multi_logloss'}:
eval_metric = 'binary_logloss'
elif eval_metric in {'error', 'multi_error'}:
eval_metric = 'binary_error'
# do not modify args, as it causes errors in model selection tools
valid_sets = None
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
valid_sets = [None] * len(eval_set)
for i, (valid_x, valid_y) in enumerate(eval_set):
if valid_x is X and valid_y is y:
valid_sets[i] = (valid_x, _y)
else:
valid_sets[i] = (valid_x, self._le.transform(valid_y))
super(LGBMClassifier, self).fit(X, _y, sample_weight=sample_weight,
init_score=init_score, eval_set=valid_sets,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_class_weight=eval_class_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks, init_model=init_model)
return self
fit.__doc__ = LGBMModel.fit.__doc__
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Docstring is inherited from the LGBMModel."""
result = self.predict_proba(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if callable(self._objective) or raw_score or pred_leaf or pred_contrib:
return result
else:
class_index = np.argmax(result, axis=1)
return self._le.inverse_transform(class_index)
predict.__doc__ = LGBMModel.predict.__doc__
def predict_proba(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Return the predicted probability for each class for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
**kwargs
Other parameters for the prediction.
Returns
-------
predicted_probability : array-like of shape = [n_samples, n_classes]
The predicted probability for each class for each sample.
X_leaves : array-like of shape = [n_samples, n_trees * n_classes]
If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
X_SHAP_values : array-like of shape = [n_samples, (n_features + 1) * n_classes]
If ``pred_contrib=True``, the feature contributions for each sample.
"""
result = super(LGBMClassifier, self).predict(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if callable(self._objective) and not (raw_score or pred_leaf or pred_contrib):
warnings.warn("Cannot compute class probabilities or labels "
"due to the usage of customized objective function.\n"
"Returning raw scores instead.")
return result
elif self._n_classes > 2 or raw_score or pred_leaf or pred_contrib:
return result
else:
return np.vstack((1. - result, result)).transpose()
@property
def classes_(self):
"""Get the class label array."""
if self._classes is None:
raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
return self._classes
@property
def n_classes_(self):
"""Get the number of classes."""
if self._n_classes is None:
raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
return self._n_classes
class LGBMRanker(LGBMModel):
"""LightGBM ranker."""
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None, eval_metric=None,
eval_at=[1, 2, 3, 4, 5], early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto',
callbacks=None, init_model=None):
"""Docstring is inherited from the LGBMModel."""
# check group data
if group is None:
raise ValueError("Should set group for ranking task")
if eval_set is not None:
if eval_group is None:
raise ValueError("Eval_group cannot be None when eval_set is not None")
elif len(eval_group) != len(eval_set):
raise ValueError("Length of eval_group should be equal to eval_set")
elif (isinstance(eval_group, dict)
and any(i not in eval_group or eval_group[i] is None for i in range_(len(eval_group)))
or isinstance(eval_group, list)
and any(group is None for group in eval_group)):
raise ValueError("Should set group for all eval datasets for ranking task; "
"if you use dict, the index should start from 0")
self._eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_group=eval_group,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks, init_model=init_model)
return self
_base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (_base_doc[:_base_doc.find('eval_class_weight :')]
+ _base_doc[_base_doc.find('eval_init_score :'):])
_base_doc = fit.__doc__
_before_early_stop, _early_stop, _after_early_stop = _base_doc.partition('early_stopping_rounds :')
fit.__doc__ = (_before_early_stop
+ 'eval_at : list of int, optional (default=[1, 2, 3, 4, 5])\n'
+ ' ' * 12 + 'The evaluation positions of the specified metric.\n'
+ ' ' * 8 + _early_stop + _after_early_stop)
|
the-stack_106_29960 | """
Django settings for django_get_started project.
"""
from os import path
PROJECT_ROOT = path.dirname(path.abspath(path.dirname(__file__)))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = (
'localhost',
)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_ROOT, 'db.sqlite3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
LOGIN_URL = '/login'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = path.join(PROJECT_ROOT, 'static').replace('\\', '/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_get_started.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django_get_started.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Specify the default test runner.
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
|
the-stack_106_29962 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import functools
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils import _safe_indexing
from ..pairwise import pairwise_distances_chunked
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
"""Check that number of labels are valid.
Parameters
----------
n_labels : int
Number of labels.
n_samples : int
Number of samples.
"""
if not 1 < n_labels < n_samples:
raise ValueError(
"Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)"
% n_labels
)
def silhouette_score(
X, labels, *, metric="euclidean", sample_size=None, random_state=None, **kwds
):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is ``2 <= n_labels <= n_samples - 1``.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array-like of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If ``X`` is
the distance array itself, use ``metric="precomputed"``.
sample_size : int, default=None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, default=None
Determines random number generation for selecting a subset of samples.
Used when ``sample_size is not None``.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : array-like of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
"""
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)), dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(
labels, weights=D_chunk[i], minlength=len(label_freqs)
)
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start : start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
def silhouette_samples(X, labels, *, metric="euclidean", **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array-like of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=["csc", "csr"])
# Check for non-zero diagonal entries in precomputed distance matrix
if metric == "precomputed":
error_msg = ValueError(
"The precomputed distance matrix contains non-zero "
"elements on the diagonal. Use np.fill_diagonal(X, 0)."
)
if X.dtype.kind == "f":
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(np.diagonal(X)) > atol):
raise ValueError(error_msg)
elif np.any(np.diagonal(X) != 0): # integral dtype
raise ValueError(error_msg)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
kwds["metric"] = metric
reduce_func = functools.partial(
_silhouette_reduce, labels=labels, label_freqs=label_freqs
)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode="clip")
with np.errstate(divide="ignore", invalid="ignore"):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide="ignore", invalid="ignore"):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0.0, 0.0
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (
1.0
if intra_disp == 0.0
else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0))
)
def davies_bouldin_score(X, labels):
"""Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=float)
for k in range(n_labels):
cluster_k = _safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(pairwise_distances(cluster_k, [centroid]))
centroid_distances = pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
centroid_distances[centroid_distances == 0] = np.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = np.max(combined_intra_dists / centroid_distances, axis=1)
return np.mean(scores)
|
the-stack_106_29964 | # ===============================================================================
#
# ===============================================================================
# It seems that both Pi pins, per segment, are set as output.
# LED input pin set '1' and output pin set '0'.
# The "digit" pin goes low and the "segment" pin goes hi to drive the segment.
import RPi.GPIO as GPIO # Import 'RPi.GPIO' module as 'GPIO'.
# Module contents can be called by 'GPIO.<var>'
# instead of 'RPi.GPIO.<var>'
GPIO.setmode(GPIO.BCM) # Set pin mode to BCM
GPIO.setwarnings(False) # Suppress display of warnings
import Segments # Import 'Segments' and 'Digits' modules
import Digits # These provide pin to segment/digit mapping
# ===============================================================================
for segment in Segments.segments: # Set both 'segment' and 'digit' pins as GPIO.OUT.
GPIO.setup(segment,GPIO.OUT) # Only need to do this once.
for digit in Digits.digits: # Used to do this multiple times with each:
GPIO.setup(digit,GPIO.OUT) # 'def Zero()' etc
# ===============================================================================
def DisplayClear(): # Reset display by making input Lo and output Hi. No segments display.
for segment in Segments.segments:
GPIO.output(segment,False)
for digit in Digits.digits:
GPIO.output(digit,True)
# ===============================================================================
# ToDo: Is there a way to loop and compact 'Segments' and 'Digits' sections?
# ===============================================================================
# ===============================================================================
# 'Segments' loop
def Decimal():
for segment in Segments.Decimal:
GPIO.output(segment,True)
def Zero():
for segment in Segments.Zero:
GPIO.output(segment,True)
def One():
for segment in Segments.One:
GPIO.output(segment,True)
def Two():
for segment in Segments.Two:
GPIO.output(segment,True)
def Three():
for segment in Segments.Three:
GPIO.output(segment,True)
def Four():
for segment in Segments.Four:
GPIO.output(segment,True)
def Five():
for segment in Segments.Five:
GPIO.output(segment,True)
def Six():
for segment in Segments.Six:
GPIO.output(segment,True)
def Seven():
for segment in Segments.Seven:
GPIO.output(segment,True)
def Eight():
for segment in Segments.Eight:
GPIO.output(segment,True)
def Nine():
for segment in Segments.Nine:
GPIO.output(segment,True)
# ===============================================================================
# 'Digits' loop
def PlaceOne():
for digit in Digits.One:
GPIO.output(digit,False)
def PlaceTwo():
for digit in Digits.Two:
GPIO.output(digit,False)
def PlaceThree():
for digit in Digits.Three:
GPIO.output(digit,False)
def PlaceFour():
for digit in Digits.Four:
GPIO.output(digit,False)
# ===============================================================================
# Loop to map symbol-numbers to word-numbers.
def Display(Position,Number):
DisplayClear()
if Number == 0:
Zero()
if Number == 1:
One()
if Number == 2:
Two()
if Number == 3:
Three()
if Number == 4:
Four()
if Number == 5:
Five()
if Number == 6:
Six()
if Number == 7:
Seven()
if Number == 8:
Eight()
if Number == 9:
Nine()
if Position == 1:
PlaceOne()
if Position == 2:
PlaceTwo()
if Position == 3:
PlaceThree()
if Position == 4:
PlaceFour()
# ===============================================================================
#
# ===============================================================================
|
the-stack_106_29966 | # SPDX-License-Identifier: Apache-2.0
# Copyright 2021 IBM Corp.
import unittest
import subprocess
import os
import filecmp
import sys
MEM_ERR = 101
SECTOOLS="../secvarctl-cov"
SECVARPATH="/sys/firmware/secvar/vars/"
goodAuths=[]
badAuths=[]
goodESLs=[]
goodCRTs=[]
brokenAuths=[]
brokenESLs=[]
brokenCrts=[]
brokenPkcs7s=[]
secvarctlCommands=[
[["--usage"], True],
[["--help"], True],
[["-v"], False], #no command
[[], False],#no commands
[["foobar"], False]#bad command
]
ppcSecVarsRead=[
[["read"], True],
[["read", "-v"], True], [["read", "-r"], True], #verbose and raw
[["read", "badVarname"], False], #bad var name
]
#=[[command], return expected]
readCommands=[
[["-p","./testenv/"], True], #no args and change path
[["-f", "./testenv/PK/data"], True], #give esl file
[["-p","./testenv/","PK"], True],[["-p","./testenv/","KEK"], True],
[["-p","./testenv/","db"], True],[["-p","./testenv/","dbx"], True],#print only PK, KEK,and dbx
[["--usage"], True],[["--help"], True], #usage and help
[["-f", "./testenv/db/data", "-r"], True],#print raw data from file
[["-p", "./testenv/", "-r"], True], #print raw data from current vars
[["-p", "."], False],#bad path
[["-f", "./testdata/db_by_PK.auth"], False],#given authfile instead of esl
[["-p"], False], #only -p no path
[["-f"], False],#only -f no file
[["-f","-p","-f"], False], #idek but should fail
[["-f", "foo"], False], #fake file should fail
]
verifyCommands=[
[["--usage"], True],[["--help"], True],
[["-c", "PK","./testenv/PK/data", "-u", "db","./testdata/db_by_PK.auth"], True],#update with current vars set and update set
[["-c", "PK","./testenv/PK/data","KEK","./testenv/KEK/data","db","./testenv/db/data","-u", "db","./testdata/db_by_PK.auth", "KEK", "./testdata/KEK_by_PK.auth", "PK", "./testdata/PK_by_PK.auth" ], True], #update chain with given current vars set
[["-p","./testenv/","-u", "db","./testdata/db_by_PK.auth", "KEK", "./testdata/KEK_by_PK.auth", "PK", "./testdata/PK_by_PK.auth" ], True], #update chain with path set
[["-p", "./testenv/", "-u", "db", "./testdata/db_by_PK.auth", "db", "./testdata/db_by_KEK.auth"], True], #submit newer update after older
[["-c", "PK","./testenv/PK/data", "KEK", "./testenv/KEK/foo", "-u", "db","./testdata/db_by_PK.auth"], True],#KEK bad path, should continue
[["-p","./testenv/", "-u", "db", "./testdata/brokenFiles/1db_by_PK.auth","KEK", "./testdata/KEK_by_PK.auth", "PK", "./testdata/PK_by_PK.auth" ], False], #update chain with one broken auth file should fail
[["-p","./testenv/", "-u", "db", "./testdata/db_by_PK.auth","KEK", "./testdata/KEK_by_PK.auth", "PK", "./testdata/bad_PK_by_db.auth" ], False], #update chain with one improperly signed auth file should fail
[["-u" ,"db", "./testdata/db_by_PK.auth","-p"], False], #no path given, should fail
[["-c","-u"], False],#no vars given
[["-c","PK","./testenv/PK/data"], False],#no updates given
[["-p", "./testenv", "-u", "db", "./testdata/db_by_KEK.auth", "db", "./testdata/db_by_PK.auth"], False], #submit older update after newer
[["-p","./testenv/","-u", "KEK", "./testdata/KEK_by_PK.auth", "PK", "./testdata/PK_by_PK.auth","db","./testdata/db_by_PK.auth" ], False],#update chain with bad order
[["-v"], False], #verify no args
[["-u", "db", "notRealFile.auth"], False], #not real file
[["-u", "./testenv/db_by_PK.auth", "db"], False],#wrong order
[["-u", "PK"], False],#no file given
[["-v", "-w", "-c", "PK","./testenv/PK/data","KEK","./testenv/KEK/data","db","./testenv/db/data","-u", "db","./testdata/db_by_PK.auth", "KEK", "./testdata/KEK_by_PK.auth", "PK", "./testdata/PK_by_PK.auth" ], False],#no where to write it too so should fail
[["-p", "./testenv/", "-u", "TS", "testdata/db_by_KEK.auth"], False], #cannot update TS variable, Its illegal, dont do it...ever
[["-c", "PK","./testenv/PK/data", "-u", "db","./testdata/db_by_PK.auth","-u", "KEK","./testdata/KEK_by_PK.auth"], False],#update vars set twice
[["-c", "PK","./testenv/PK/data", "./testenv/KEK/data", "KEK", "-u", "db","./testdata/db_by_PK.auth"], False],#current vars bad format
[["-c", "PK", "KEK", "./testenv/PK/data", "./testenv/KEK/data", "-u", "db","./testdata/db_by_PK.auth"], False],#current vars bad format
[["-c", "PK", "./testenv/PK/data", "KEK", "-u", "db","./testdata/db_by_PK.auth"], False],#current vars bad format
[["-c", "KEK", "./testenv/KEK/data", "-u", "PK", "./testdata/PK_by_PK.auth", "db", "./testdata/bad_db_by_db.auth"], False]
]
writeCommands=[
[["--usage"], True],[["--help"], True],
[["KEK","./testdata/KEK_by_PK.auth", "-p", "./testenv/"], True], #different ordering should still work
[["KEK","./testdata/KEK_by_PK.auth", "-p", "./testenv/","-v"], True], #different ordering should still work with verbose
[["TS", "./testdata/KEK_by_PK.auth", "-p", "./testenv/"], False], #no TS varible updates allowed
[["db", "foo.file"], False], #bad auth file
[["KEK","-v", "-p", "./testenv/"], False], #should fail, no file
[["KEK","./testdata/KEK_by_PK.auth", "-p"], False],#no path should fail
[["KeK","./testdata/KEK_by_PK.auth", "-p", "./testenv/"], False], #bad var name should fail
[["KEK","./testdata/KEK_by_PK.auth", "-p", "./testenvironement/"], False], #bad path should fail
[["db"], False], #no authfile
[[], False]#no auth or var
]
validateCommands=[
[["--usage"], True],[["--help"], True],
[["-v"], False],#no input file
[["thisDontExist.auth"], False],#nonexistent file
[["-e"], False], #no esl
[["-c"], False], # no crt
[["-p"], False],#no pkcs7
[["-p","./testdata/db_by_PK.auth"], False],#give auth as pkcs7
]
# these really arnt tests with bad envs, its more so tests that use two commands to run.
badEnvCommands=[ #[arr command to skew env, output of first command, arr command for sectool, expected result]
[["rm", "./testenv/KEK/size"],None,["read", "-p", "./testenv/", "KEK"], False], #remove size and it should fail
[["rm", "./testenv/KEK/size"],None,["read", "-p", "./testenv/"], True], #remove size but as long as one is readable then it is ok
[['echo', '"hey fail!"'],"./testenv/db/size",["read", "-p", "./testenv/", "db"], False], #read from ascii size file should fail
[["dd" , "if=./testdata/goldenKeys/KEK/data", "of=./testenv/KEK/data", "count=100", "bs=1"],"log.txt",["verify", "-v","-p", "./testenv/", "-u","db", "./testdata/db_by_KEK.auth"], False], #verify against path with bad esl files should fail, modified THAT SHOULD NEVER HAPPEN!
[["rm", "-r", "./testenv/db","./testenv/dbx","./testenv/KEK","./testenv/PK", "./testenv/TS" ],None,["verify","-v","-p", "./testenv/","-u","PK", "./testdata/PK_by_PK.auth"], True],# no data in path should enter setup mode
[["cp","./testdata/brokenFiles/empty.esl" ,"./testenv/PK/data" ],None,["verify","-v","-p", "./testenv/","-u","PK", "./testdata/PK_by_PK.auth"], True],# no data in pk ==setup mode
[["rm","./testenv/db/update"],None,["verify","-v","-w","-p", "./testenv/","-u","db", "./testdata/db_by_PK.auth"], False],# no update file should exit
[["cp","./testdata/brokenFiles/empty.esl" ,"./testenv/PK/data" ],None,["read","-p", "./testenv/"], True],# Pk will be empty but other files will have things
[["cp","./testdata/brokenFiles/empty.esl" ,"./testenv/PK/data" ],None,["read","-p", "./testenv/", "PK"], False],# Pk will be empty, nothing else read so overall failure
[["echo", "16"], "./testenv/TS/size", ["verify", "-v" , "-p", "./testenv/", "-u", "PK", "./testdata/PK_by_PK.auth"], False],
[["dd", "if=/dev/zero", "of=./testenv/TS/data", "count=4", "bs=16"], None, ["verify", "-p", "./testenv/", "-u", "PK", "testdata/PK_by_PK.auth"], True], #If timestamp entry for a variable is empty than thats okay
[["echo", "0"], "./testenv/KEK/size", ["verify", "-p", "./testenv/", "-u", "db", "./testdata/db_by_PK.auth"], True] #an empty KEK should not interupt db by PK verification
]
def command(args, out=None):#stores last log of function into log file
if out:
#if memory tests being done, use valgrind as well
with open(out, "w") as f:
f.write("\n\n**********COMMAND RAN: $"+ ' '.join(args) +"\n")
result = subprocess.call(args, stdout=f , stderr=f)
f.close()
return result
return subprocess.call(args,stdout=out , stderr=out)
def getCmdResult(args, out, self):
if MEMCHECK:
mem_cmd = ["valgrind", "-q", "--error-exitcode="+str(MEM_ERR), "--leak-check=full"] + args
with open(out, "w") as f:
f.write("\n\n**********COMMAND RAN: $"+ ' '.join(mem_cmd) +"\n")
result = subprocess.call(mem_cmd, stdout=f , stderr=f)
f.close()
self.assertNotEqual(result, MEM_ERR)
#we run twice because valgrind interprets a -1 return code as a 0, which stinks
rc = command(args, out)
if rc == 0:
return True
else:
return False
def setupTestEnv():
out="log.txt"
command(["cp", "-a", "./testdata/goldenKeys/.", "testenv/"], out)
def setupArrays():
for file in os.listdir("./testdata"):
if file.endswith(".auth"):
if file.startswith("bad_"):
fileName=file[4:-5];
arr=fileName.split("_")
badAuths.append([file,arr[0],arr[2]]) #[filename, keyname,keysigner]
elif file.startswith("empty_"):
#auths with noESL are key delete updates, perfectly valid, add to goodauths
fileName = file[6:-5]
arr=fileName.split("_")
goodAuths.append([file, arr[0],arr[2]])
else:
fileName=file[:-5];
arr=fileName.split("_")
goodAuths.append([file,arr[0],arr[2]])
elif file.endswith(".esl") and not file.startswith("empty"):
if not file.startswith("bad"):
fileName=file[:-4];
arr=fileName.split("_")
goodESLs.append([file,arr[0],arr[2]])
elif file.endswith(".der"):
if not file.startswith("bad"):
fileName=file[:-4]
arr=fileName.split("_")
goodCRTs.append([file,arr[0],arr[2]])
elif file.endswith(".crt"):
if not file.startswith("bad"):
fileName=file[:-4]
arr=fileName.split("_")
goodCRTs.append([file,arr[0],arr[2]])
for file in os.listdir("./testdata/brokenFiles"): #sort broken files into esl's crts and auths
if file.endswith(".esl"):
brokenESLs.append("./testdata/brokenFiles/"+file)
elif file.endswith(".der") or file.endswith(".crt"):
brokenCrts.append("./testdata/brokenFiles/"+file)
elif file.endswith(".auth"):
brokenAuths.append("./testdata/brokenFiles/"+file)
elif file.endswith(".pkcs7"):
brokenPkcs7s.append("./testdata/brokenFiles/"+file)
def compareFiles(a,b):
if filecmp.cmp(a,b):
return True
return False
class Test(unittest.TestCase):
def test_secvarctl_basic(self):
out="secvarctlBasiclog.txt"
cmd=[SECTOOLS]
for i in secvarctlCommands:
self.assertEqual( getCmdResult(cmd+i[0],out, self),i[1])
def test_ppcSecVarsRead(self):
out="ppcSecVarsReadlog.txt"
cmd=[SECTOOLS]
#if power sysfs exists read current keys
if os.path.isdir(SECVARPATH):
for i in ppcSecVarsRead:
self.assertEqual( getCmdResult(cmd+i[0],out, self),i[1])
else:
with open(out, "w") as f:
f.write("POWER SECVAR LOCATION ( "+ SECVARPATH + " ) DOES NOT EXIST SO NO TESTS RAN\n")
f.close();
def test_verify(self):
out="verifylog.txt"
cmd=[SECTOOLS, "verify"]
for fileInfo in goodAuths:
file="./testdata/"+fileInfo[0]
self.assertEqual( getCmdResult(cmd+[ "-w", "-p", "testenv/","-u",fileInfo[1],file],out, self), True)#verify all auths are signed by keys in testenv
self.assertEqual(compareFiles("testenv/"+fileInfo[1]+"/update", file), True)#assert files wrote correctly
for fileInfo in badAuths:
file="./testdata/"+fileInfo[0]
self.assertEqual( getCmdResult(cmd+[ "-p", "testenv/","-u",fileInfo[1],file],out, self), False)#verify all bad auths are not signed correctly
for i in verifyCommands:
self.assertEqual( getCmdResult(cmd+i[0],out, self),i[1])
def test_validate(self):
out="validatelog.txt"
cmd=[SECTOOLS, "validate"]
for i in validateCommands:
self.assertEqual( getCmdResult(cmd+i[0],out, self),i[1])
for i in goodAuths: #validate all auths
file="./testdata/"+i[0]
if i[1] != "dbx":
self.assertEqual( getCmdResult(cmd+[file],out, self), True)
else:
self.assertEqual( getCmdResult(cmd+[file, "-x"],out, self), True)
for i in goodESLs:
file="./testdata/"+i[0]
if i[1] != "dbx":
file="./testdata/"+i[0]
self.assertEqual( getCmdResult(cmd+["-e",file],out, self), True)
else:
self.assertEqual( getCmdResult(cmd+["-e", file, "-x"],out, self), True)
for i in goodCRTs:
file="./testdata/"+i[0]
self.assertEqual( getCmdResult(cmd+["-v","-c",file],out, self), True)
for i in brokenAuths:
self.assertEqual( getCmdResult(cmd+["-v", i],out, self), False)
for i in brokenESLs:
self.assertEqual( getCmdResult(cmd+["-v", "-e", i],out, self), False)
for i in brokenCrts:
self.assertEqual( getCmdResult(cmd+["-v", "-c", i],out, self), False)
for i in brokenPkcs7s:
self.assertEqual( getCmdResult(cmd+["-v", "-p", i],out, self), False)
def test_read(self):
out="readlog.txt"
cmd=[SECTOOLS, "read"]
#self.assertEqual(not not not command(cmd,out, self), True) #no args
for i in readCommands:
self.assertEqual( getCmdResult(cmd+i[0],out, self),i[1])
for i in brokenESLs:
#read should read sha and rsa esl's w no problem
if i.startswith("./testdata/brokenFiles/sha") or i.startswith("./testdata/brokenFiles/rsa"):
self.assertEqual( getCmdResult(cmd+["-f", i],out, self), True)
else:
self.assertEqual( getCmdResult(cmd+["-f", i],out, self), False) #all truncated esls should fail to print human readable info
def test_write(self):
out="writelog.txt"
cmd=[SECTOOLS,"write"]
path="./testenv/"
for i in writeCommands:
self.assertEqual( getCmdResult(cmd+i[0],out, self),i[1])
for i in goodAuths: #try write with good auths, validation included
file="./testdata/"+i[0]
preUpdate=file#get auth
postUpdate=path+i[1]+"/update" #./testenv/<varname>/update
self.assertEqual( getCmdResult(cmd+[ "-p", path,i[1],file],out, self), True)#assert command runs
self.assertEqual(compareFiles(preUpdate,postUpdate), True)# assert auths esl is equal to data written to update file
for i in brokenAuths:
self.assertEqual( getCmdResult(cmd+["-p", path, "KEK",i],out, self), False)#broken auths should fail
self.assertEqual( getCmdResult(cmd+["-p", path ,"-f", "KEK",i],out, self), True)#if forced, they should work
self.assertEqual(compareFiles(i,path+"KEK/update"), True)
def test_badenv(self):
out="badEnvLog.txt"
for i in badEnvCommands:
setupTestEnv()
command(i[0],i[1])
self.assertEqual( getCmdResult([SECTOOLS]+i[2],out, self),i[3])
setupTestEnv()
if __name__ == '__main__':
if "MEMCHECK" in sys.argv:
MEMCHECK = True
else:
MEMCHECK = False
del sys.argv[1:]
setupArrays()
setupTestEnv()
unittest.main()
|
the-stack_106_29968 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
_MEMBER = "role:member"
_PROJECT_MEMBER = f"{_MEMBER} and project_id:%(target.order.project_id)s"
rules = [
policy.DocumentedRuleDefault(
name='orders:get',
check_str=f'rule:all_but_audit or {_MEMBER}',
scope_types=['project'],
description='Gets list of all orders associated with a project.',
operations=[
{
'path': '/v1/orders',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name='orders:post',
check_str=f'rule:admin_or_creator or {_MEMBER}',
scope_types=['project'],
description='Creates an order.',
operations=[
{
'path': '/v1/orders',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name='orders:put',
check_str=f'rule:admin_or_creator or {_MEMBER}',
scope_types=['project'],
description='Unsupported method for the orders API.',
operations=[
{
'path': '/v1/orders',
'method': 'PUT'
}
]
),
policy.DocumentedRuleDefault(
name='order:get',
check_str='rule:all_users and project_id:%(target.order.project_id)s '
f'or {_PROJECT_MEMBER}',
scope_types=['project'],
description='Retrieves an orders metadata.',
operations=[
{
'path': '/v1/orders/{order-id}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name='order:delete',
check_str='rule:admin and project_id:%(target.order.project_id)s or '
f'{_PROJECT_MEMBER}',
scope_types=['project'],
description='Deletes an order.',
operations=[
{
'path': '/v1/orders/{order-id}',
'method': 'DELETE'
}
],
)
]
def list_rules():
return rules
|
the-stack_106_29970 | import logging
from ledfx.devices import Device
import voluptuous as vol
import numpy as np
import sacn
import time
_LOGGER = logging.getLogger(__name__)
class E131Device(Device):
"""E1.31 device support"""
CONFIG_SCHEMA = vol.Schema({
vol.Required('ip_address', description='Hostname or IP address of the device'): str,
vol.Optional('universe', description='DMX universe for the device', default=1): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional('universe_size', description='Size of each DMX universe', default=512): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional('channel_offset', description='Channel offset within the DMX universe', default=0): vol.All(vol.Coerce(int), vol.Range(min=0))
})
def __init__(self, ledfx, config):
super().__init__(ledfx, config)
# Allow for configuring in terms of "pixels" or "channels"
self._config['channel_count'] = self.pixel_count * 3
span = self._config['channel_offset'] + self._config['channel_count'] - 1
self._config['universe_end'] = self._config['universe'] + int(span / self._config['universe_size'])
if span % self._config['universe_size'] == 0:
self._config['universe_end'] -= 1
self._sacn = None
def activate(self):
if self._sacn:
raise Exception('sACN sender already started.')
# Configure sACN and start the dedicated thread to flush the buffer
self._sacn = sacn.sACNsender()
for universe in range(self._config['universe'], self._config['universe_end'] + 1):
_LOGGER.info("sACN activating universe {}".format(universe))
self._sacn.activate_output(universe)
if (self._config['ip_address'] == None):
self._sacn[universe].multicast = True
else:
self._sacn[universe].destination = self._config['ip_address']
self._sacn[universe].multicast = False
#self._sacn.fps = 60
self._sacn.start()
_LOGGER.info("sACN sender started.")
super().activate()
def deactivate(self):
super().deactivate()
if not self._sacn:
raise Exception('sACN sender not started.')
# Turn off all the LEDs when deactivating. With how the sender
# works currently we need to sleep to ensure the pixels actually
# get updated. Need to replace the sACN sender such that flush
# directly writes the pixels.
self.flush(np.zeros(self._config['channel_count']))
time.sleep(1.5)
self._sacn.stop()
self._sacn = None
_LOGGER.info("sACN sender stopped.")
def flush(self, data):
"""Flush the data to all the E1.31 channels account for spanning universes"""
if not self._sacn:
raise Exception('sACN sender not started.')
if data.size != self._config['channel_count']:
raise Exception('Invalid buffer size. ({} != {})'.format(
data.size, self._config['channel_count']))
data = data.flatten()
current_index = 0
for universe in range(self._config['universe'], self._config['universe_end'] + 1):
# Calculate offset into the provide input buffer for the channel. There are some
# cleaner ways this can be done... This is just the quick and dirty
universe_start = (universe - self._config['universe']) * self._config['universe_size']
universe_end = (universe - self._config['universe'] + 1) * self._config['universe_size']
dmx_start = max(universe_start, self._config['channel_offset']) % self._config['universe_size']
dmx_end = min(universe_end, self._config['channel_offset'] + self._config['channel_count']) % self._config['universe_size']
if dmx_end == 0:
dmx_end = self._config['universe_size']
input_start = current_index
input_end = current_index + dmx_end - dmx_start
current_index = input_end
dmx_data = np.array(self._sacn[universe].dmx_data)
dmx_data[dmx_start:dmx_end] = data[input_start:input_end]
self._sacn[universe].dmx_data = dmx_data.clip(0,255)
# # Hack up a manual flush of the E1.31 data vs having a background thread
# if self._sacn._output_thread._socket:
# for output in list(self._sacn._output_thread._outputs.values()):
# self._sacn._output_thread.send_out(output) |
the-stack_106_29971 | """This module includes class to calculate the probability of the ideal hand."""
from dataclasses import dataclass
from math import comb
from typing import List, Tuple
@dataclass
class Hand:
"""Stores hand data, and calculate the probability of given hand.
Let d be # of cards in deck, h be # of cards in hand,
n be # of all cards and m be # of all hands, the probability of hand is written as
comb(d, h) * comb(n - d, m - h) / comb(n, m)
"""
num_deck: int
num_hand: int
nums_in_deck: List[int]
combinations: List[Tuple[int, ...]]
def _calc_each_prob(self, hand_comb: Tuple[int, ...]) -> float:
"""Return probability of hand.
The input is a combination of cards in hand. For example,
if two card X and one card Y are in hand, hand_comb becomes (2, 1).
:param num_deck: # of all cards in deck.
:param num_hand: # of all cards in hand.
:param nums_in_deck: List of # of each card in deck.
:param hand_comb: Combination of cards in hand. See example for the detail.
"""
deck_rest = self.num_deck
hand_rest = self.num_hand
num_comb = 1 # Count # of hand combination.
for num_in_hand, num_in_deck in zip(hand_comb, self.nums_in_deck):
num_comb *= comb(num_in_deck, num_in_hand)
deck_rest -= num_in_deck
hand_rest -= num_in_hand
# If a hand remains, fill it with other cards.
if hand_rest > 0:
num_comb *= comb(deck_rest, hand_rest)
return num_comb / comb(self.num_deck, self.num_hand)
def calc_prob(self) -> float:
"""Return the probability for each combination of hands.
The probability of all possible combination is calculated
simply by adding all probability of each combination, because
they are exclusive each other.
"""
prob = 0.0
for hand_comb in self.combinations:
prob += self._calc_each_prob(hand_comb)
return prob
|
the-stack_106_29972 | from django import forms
from django.db.models import Q
from django.utils.translation import npgettext, pgettext_lazy
from django_filters import (
CharFilter, ChoiceFilter, DateFromToRangeFilter, ModelMultipleChoiceFilter,
OrderingFilter, RangeFilter)
from ...core.filters import SortedFilterSet
from ...discount.models import Sale, Voucher
from ...product.models import Category
from ..widgets import DateRangeWidget
SORT_BY_FIELDS_SALE = {
'name': pgettext_lazy('Sale list sorting option', 'name'),
'value': pgettext_lazy('Sale list sorting option', 'value'),
'start_date': pgettext_lazy('Sale list sorting option', 'start_date'),
'end_date': pgettext_lazy('Sale list sorting option', 'end_date')}
SORT_BY_FIELDS_LABELS_VOUCHER = {
'name': pgettext_lazy('Voucher list sorting option', 'name'),
'discount_value': pgettext_lazy(
'Voucher list sorting option', 'discount_value'),
'countries': pgettext_lazy('Voucher list sorting option', 'countries'),
'start_date': pgettext_lazy('Voucher list sorting option', 'start_date'),
'end_date': pgettext_lazy('Voucher list sorting option', 'end_date'),
'used': pgettext_lazy('Voucher list sorting option', 'used'),
'min_amount_spent': pgettext_lazy(
'Voucher list sorting option', 'min_amount_spent')}
DISCOUNT_TYPE_CHOICES = (
('fixed', pgettext_lazy('Sale type filter choice', 'USD')),
('percentage', pgettext_lazy('Sale type filter choice', '%')))
def filter_by_date_range(queryset, name, value):
q = Q()
if value.start:
q = Q(start_date__gte=value.start)
if value.stop:
if value.start:
q |= Q(end_date__lte=value.stop)
else:
q = Q(end_date__lte=value.stop)
return queryset.filter(q)
class SaleFilter(SortedFilterSet):
name = CharFilter(
label=pgettext_lazy('Sale list filter label', 'Name'),
lookup_expr='icontains')
categories = ModelMultipleChoiceFilter(
label=pgettext_lazy('Sale list filter label', 'Categories'),
field_name='categories',
queryset=Category.objects.all())
type = ChoiceFilter(
label=pgettext_lazy('Sale list filter label', 'Discount type'),
choices=DISCOUNT_TYPE_CHOICES,
empty_label=pgettext_lazy('Filter empty choice label', 'All'),
widget=forms.Select)
value = RangeFilter(
label=pgettext_lazy('Sale list filter label', 'Value'))
date = DateFromToRangeFilter(
label=pgettext_lazy(
'Sale list sorting filter label', 'Period of validity'),
field_name='created', widget=DateRangeWidget,
method=filter_by_date_range)
sort_by = OrderingFilter(
label=pgettext_lazy('Sale list filter label', 'Sort by'),
fields=SORT_BY_FIELDS_SALE.keys(),
field_labels=SORT_BY_FIELDS_SALE)
class Meta:
model = Sale
fields = []
def get_summary_message(self):
counter = self.qs.count()
return npgettext(
'Number of matching records in the dashboard sales list',
'Found %(counter)d matching sale',
'Found %(counter)d matching sales',
number=counter) % {'counter': counter}
class VoucherFilter(SortedFilterSet):
name = CharFilter(
label=pgettext_lazy('Voucher list name filter label', 'Name'),
lookup_expr='icontains')
type = ChoiceFilter(
field_name='discount_value_type',
label=pgettext_lazy(
'Sale list is sale type filter label', 'Discount type'),
choices=DISCOUNT_TYPE_CHOICES,
empty_label=pgettext_lazy('Filter empty choice label', 'All'),
widget=forms.Select)
discount_value = RangeFilter(
label=pgettext_lazy('Sale list filter label', 'Discount_value'))
date = DateFromToRangeFilter(
label=pgettext_lazy(
'Voucher list sorting filter label', 'Period of validity'),
field_name='created', widget=DateRangeWidget,
method=filter_by_date_range)
min_amount_spent = RangeFilter(
label=pgettext_lazy(
'Voucher list sorting filter', 'Minimum amount spent'),
field_name='min_amount_spent')
sort_by = OrderingFilter(
label=pgettext_lazy('Voucher list sorting filter label', 'Sort by'),
fields=SORT_BY_FIELDS_LABELS_VOUCHER.keys(),
field_labels=SORT_BY_FIELDS_LABELS_VOUCHER)
class Meta:
model = Voucher
fields = []
def get_summary_message(self):
counter = self.qs.count()
return npgettext(
'Number of matching records in the dashboard vouchers list',
'Found %(counter)d matching voucher',
'Found %(counter)d matching vouchers',
number=counter) % {'counter': counter}
|
the-stack_106_29973 | import os
import mock
import hypothesis as h
import hypothesis.strategies as hs
import pytest
from .. import manifest, item, utils
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls(s, utils.rel_path_to_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
def SourceFileWithTests(path, hash, cls, variants):
s = mock.Mock(rel_path=path, hash=hash)
tests = [cls(s, item[0], *item[1:]) for item in variants]
s.manifest_items = mock.Mock(return_value=(cls.item_type, tests))
return s
@hs.composite
def rel_dir_file_path(draw):
length = draw(hs.integers(min_value=1, max_value=20))
if length == 1:
return "a"
else:
remaining = length - 2
if os.path.sep == "/":
alphabet = "a/"
elif os.path.sep == "\\":
alphabet = "a/\\"
else:
assert False, "uhhhh, this platform is weird"
mid = draw(hs.text(alphabet=alphabet, min_size=remaining, max_size=remaining))
return os.path.normcase("a" + mid + "a")
@hs.composite
def sourcefile_strategy(draw):
item_classes = [item.TestharnessTest, item.RefTest, item.RefTestNode,
item.ManualTest, item.Stub, item.WebdriverSpecTest,
item.ConformanceCheckerTest, item.SupportFile]
cls = draw(hs.sampled_from(item_classes))
path = draw(rel_dir_file_path())
hash = draw(hs.text(alphabet="0123456789abcdef", min_size=40, max_size=40))
s = mock.Mock(rel_path=path, hash=hash)
if cls in (item.RefTest, item.RefTestNode):
ref_path = draw(rel_dir_file_path())
h.assume(path != ref_path)
ref_eq = draw(hs.sampled_from(["==", "!="]))
test = cls(s, utils.rel_path_to_url(path), [(utils.rel_path_to_url(ref_path), ref_eq)])
elif cls is item.SupportFile:
test = cls(s)
else:
test = cls(s, utils.rel_path_to_url(path))
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10, max_size=1000,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.ConformanceCheckerTest)])
def test_manifest_to_json(s):
m = manifest.Manifest()
assert m.update(s) is True
json_str = m.to_json()
loaded = manifest.Manifest.from_json("/", json_str)
assert list(loaded) == list(m)
assert loaded.to_json() == json_str
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.TestharnessTest)])
@h.example([SourceFileWithTest("a", "0"*40, item.RefTest, [("/aa", "==")])])
def test_manifest_idempotent(s):
m = manifest.Manifest()
assert m.update(s) is True
m1 = list(m)
assert m.update(s) is False
assert list(m) == m1
def test_manifest_to_json_forwardslash():
m = manifest.Manifest()
s = SourceFileWithTest("a/b", "0"*40, item.TestharnessTest)
assert m.update([s]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
def test_manifest_to_json_backslash():
m = manifest.Manifest()
s = SourceFileWithTest("a\\b", "0"*40, item.TestharnessTest)
if os.path.sep == "\\":
assert m.update([s]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
else:
with pytest.raises(ValueError):
# one of these must raise ValueError
# the first must return True if it doesn't raise
assert m.update([s]) is True
m.to_json()
def test_manifest_from_json_backslash():
json_obj = {
'paths': {
'a\\b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a\\b': [['/a/b', {}]]
}
}
}
with pytest.raises(ValueError):
manifest.Manifest.from_json("/", json_obj)
def test_reftest_computation_chain():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
m.update([s1, s2])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_add():
m = manifest.Manifest()
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
test2 = s2.manifest_items()[1][0]
assert m.update([s2]) is True
assert list(m) == [("reftest", test2.path, {test2})]
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
test1 = s1.manifest_items()[1][0]
# s2's hash is unchanged, but it has gone from a test to a node
assert m.update([s1, s2]) is True
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_remove():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
assert m.update([s1, s2]) is True
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
# s2's hash is unchanged, but it has gone from a node to a test
assert m.update([s2]) is True
assert list(m) == [("reftest", test2.path, {test2})]
def test_reftest_computation_chain_update_test_type():
m = manifest.Manifest()
s1 = SourceFileWithTest("test", "0"*40, item.RefTest, [("/test-ref", "==")])
assert m.update([s1]) is True
test1 = s1.manifest_items()[1][0]
assert list(m) == [("reftest", test1.path, {test1})]
# test becomes a testharness test (hash change because that is determined
# based on the file contents). The updated manifest should not includes the
# old reftest.
s2 = SourceFileWithTest("test", "1"*40, item.TestharnessTest)
assert m.update([s2]) is True
test2 = s2.manifest_items()[1][0]
assert list(m) == [("testharness", test2.path, {test2})]
def test_reftest_computation_chain_update_node_change():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTestNode, [("/test3", "==")])
assert m.update([s1, s2]) is True
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2})]
#test2 changes to support type
s2 = SourceFileWithTest("test2", "1"*40, item.SupportFile)
assert m.update([s1,s2]) is True
test3 = s2.manifest_items()[1][0]
assert list(m) == [("reftest", test1.path, {test1}),
("support", test3.path, {test3})]
def test_iterpath():
m = manifest.Manifest()
sources = [SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test1-ref", "==")]),
SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test2-ref", "==")]),
SourceFileWithTests("test2", "0"*40, item.TestharnessTest, [("/test2-1.html",),
("/test2-2.html",)]),
SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
m.update(sources)
assert set(item.url for item in m.iterpath("test2")) == set(["/test2",
"/test2-1.html",
"/test2-2.html"])
assert set(m.iterpath("missing")) == set()
def test_filter():
m = manifest.Manifest()
sources = [SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test1-ref", "==")]),
SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test2-ref", "==")]),
SourceFileWithTests("test2", "0"*40, item.TestharnessTest, [("/test2-1.html",),
("/test2-2.html",)]),
SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
m.update(sources)
json = m.to_json()
def filter(it):
for test in it:
if test[0] in ["/test2-2.html", "/test3"]:
yield test
filtered_manifest = manifest.Manifest.from_json("/", json, types=["testharness"], meta_filters=[filter])
actual = [
(ty, path, [test.id for test in tests])
for (ty, path, tests) in filtered_manifest
]
assert actual == [
("testharness", "test2", ["/test2-2.html"]),
("testharness", "test3", ["/test3"]),
]
def test_reftest_node_by_url():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
m.update([s1, s2])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert m.reftest_nodes_by_url == {"/test1": test1,
"/test2": test2_node}
m._reftest_nodes_by_url = None
assert m.reftest_nodes_by_url == {"/test1": test1,
"/test2": test2_node}
|
the-stack_106_29974 | from .base import AuthenticationBase
class GetToken(AuthenticationBase):
"""/oauth/token related endpoints
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def authorization_code(self, client_id, client_secret, code,
redirect_uri, grant_type='authorization_code'):
"""Authorization code grant
This is the OAuth 2.0 grant that regular web apps utilize in order
to access an API. Use this endpoint to exchange an Authorization Code
for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code
use authorization_code
client_id (str): your application's client Id
client_secret (str): your application's client Secret
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
"""
return self.post(
'{}://{}/oauth/token'.format(self.protocol, self.domain),
data={
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'grant_type': grant_type,
'redirect_uri': redirect_uri,
}
)
def authorization_code_pkce(self, client_id, code_verifier, code,
redirect_uri, grant_type='authorization_code'):
"""Authorization code pkce grant
This is the OAuth 2.0 grant that mobile apps utilize in order to access an API.
Use this endpoint to exchange an Authorization Code for a Token.
Args:
grant_type (str): Denotes the flow you're using. For authorization code pkce
use authorization_code
client_id (str): your application's client Id
code_verifier (str): Cryptographically random key that was used to generate
the code_challenge passed to /authorize.
code (str): The Authorization Code received from the /authorize Calls
redirect_uri (str, optional): This is required only if it was set at
the GET /authorize endpoint. The values must match
Returns:
access_token, id_token
"""
return self.post(
'{}://{}/oauth/token'.format(self.protocol, self.domain),
data={
'client_id': client_id,
'code_verifier': code_verifier,
'code': code,
'grant_type': grant_type,
'redirect_uri': redirect_uri,
}
)
def client_credentials(self, client_id, client_secret, audience,
grant_type='client_credentials'):
"""Client credentials grant
This is the OAuth 2.0 grant that server processes utilize in
order to access an API. Use this endpoint to directly request
an access_token by using the Application Credentials (a Client Id and
a Client Secret).
Args:
grant_type (str): Denotes the flow you're using. For client credentials
use client_credentials
client_id (str): your application's client Id
client_secret (str): your application's client Secret
audience (str): The unique identifier of the target API you want to access.
Returns:
access_token
"""
return self.post(
'{}://{}/oauth/token'.format(self.protocol, self.domain),
data={
'client_id': client_id,
'client_secret': client_secret,
'audience': audience,
'grant_type': grant_type,
}
)
def login(self, client_id, client_secret, username, password, scope, realm,
audience, grant_type='http://auth0.com/oauth/grant-type/password-realm'):
"""Calls /oauth/token endpoint with password-realm grant type
This is the OAuth 2.0 grant that highly trusted apps utilize in order
to access an API. In this flow the end-user is asked to fill in credentials
(username/password) typically using an interactive form in the user-agent
(browser). This information is later on sent to the client and Auth0.
It is therefore imperative that the client is absolutely trusted with
this information.
Args:
grant_type (str): Denotes the flow you're using. For password realm
use http://auth0.com/oauth/grant-type/password-realm
client_id (str): your application's client Id
client_secret (str): your application's client Secret
audience (str): The unique identifier of the target API you want to access.
username (str): Resource owner's identifier
password (str): resource owner's Secret
scope(str): String value of the different scopes the client is asking for.
Multiple scopes are separated with whitespace.
realm (str): String value of the realm the user belongs.
Set this if you want to add realm support at this grant.
Returns:
access_token, id_token
"""
return self.post(
'{}://{}/oauth/token'.format(self.protocol, self.domain),
data={
'client_id': client_id,
'username': username,
'password': password,
'realm': realm,
'client_secret': client_secret,
'scope': scope,
'audience': audience,
'grant_type': grant_type
}
)
def refresh_token(self, client_id, client_secret, refresh_token, grant_type='refresh_token', scope=''):
"""Calls /oauth/token endpoint with refresh token grant type
Use this endpoint to refresh an access token, using the refresh token you got during authorization.
Args:
grant_type (str): Denotes the flow you're using. For refresh token
use refresh_token
client_id (str): your application's client Id
client_secret (str): your application's client Secret
refresh_token (str): The refresh token returned from the initial token request.
scope (str): String value of the different scopes the client is asking for.
Multiple scopes are separated with whitespace.
Returns:
access_token, id_token
"""
return self.post(
'{}://{}/oauth/token'.format(self.protocol, self.domain),
data={
'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'scope': scope,
'grant_type': grant_type
}
)
|
the-stack_106_29975 | # https://www.codewars.com/kata/strings-mix/train/python
# My solution
from collections import Counter
from operator import itemgetter
def mix(s1, s2):
remove_ones = lambda dict_: {k:v for k,v in dict_.items() if v > 1 }
s1 = remove_ones(Counter(filter(str.islower, s1)))
s2 = remove_ones(Counter(filter(str.islower, s2)))
letters = set(s1.keys()).union(s2.keys())
r = sorted((('1',l,s1.get(l,0)) if s1.get(l,0) > s2.get(l,0) else
('2',l,s2.get(l,0)) if s2.get(l,0) > s1.get(l,0) else
('=',l,s1.get(l,0)) for l in letters), key=itemgetter(0,1))
r.sort(key=itemgetter(2), reverse=True)
return "/".join(map(lambda v: "{winner}:{null:{letter}^{times}s}".format(winner=v[0], letter=v[1], times=v[2], null='') ,r))
# ...
from collections import Counter
def mix(s1, s2):
c1 = Counter(filter(str.islower, s1))
c2 = Counter(filter(str.islower, s2))
res = []
for c in set(c1.keys() + c2.keys()):
n1, n2 = c1.get(c, 0), c2.get(c, 0)
if n1 > 1 or n2 > 1:
res.append(('1', c, n1) if n1 > n2 else
('2', c, n2) if n2 > n1 else ('=', c, n1))
res = ['{}:{}'.format(i, c * n) for i, c, n in res]
return '/'.join(sorted(res, key=lambda s: (-len(s), s)))
# ...
def mix(s1, s2):
hist = {}
for ch in "abcdefghijklmnopqrstuvwxyz":
val1, val2 = s1.count(ch), s2.count(ch)
if max(val1, val2) > 1:
which = "1" if val1 > val2 else "2" if val2 > val1 else "="
hist[ch] = (-max(val1, val2), which + ":" + ch * max(val1, val2))
return "/".join(hist[ch][1] for ch in sorted(hist, key=lambda x: hist[x]))
# ...
def mix(s1, s2):
output = []
for char in {c for c in s1 + s2 if c.islower()}:
check = s1.count(char), s2.count(char)
m = max(check)
if m > 1:
output += ["=12"[cmp(*check)] + ":" + m * char]
output.sort(key = lambda x: (-len(x), x))
return '/'.join(output)
# ...
from collections import Counter
def mix(s1, s2):
c1, c2 = [Counter({s: n for s, n in Counter(c).items() if n > 1 and s.islower()}) for c in (s1, s2)]
return '/'.join(c + ':' + -n * s for n, c, s in
sorted((-n, '=12'[(c1[s] == n) - (c2[s] == n)], s) for s, n in (c1 | c2).items()))
# ...
mix=lambda a,b:'/'.join(sorted(("=12"[cmp(*k)]+":"+max(k)*r
for r,k in{(c, (a.count(c), b.count(c)))
for c in a+b if c.islower()}if max(k)>1),
key=lambda x:(-len(x),x)))
# ...
from collections import Counter
def mix(s1, s2):
res = []
c1 = Counter([c for c in s1 if c.islower()])
c2 = Counter([c for c in s2 if c.islower()])
for c in c1 | c2:
if c1[c] > 1 and c1[c] > c2[c]: res += ['1:' + c * c1[c]]
if c2[c] > 1 and c2[c] > c1[c]: res += ['2:' + c * c2[c]]
if c1[c] > 1 and c1[c] == c2[c]: res += ['=:' + c * c1[c]]
return '/'.join(sorted(res, key = lambda a : [-len(a), a])) |
the-stack_106_29977 | # Copyright (c) 2013 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
opts = [
cfg.StrOpt('aggregate_image_properties_isolation_namespace',
help='Force the filter to consider only keys matching '
'the given namespace.'),
cfg.StrOpt('aggregate_image_properties_isolation_separator',
default=".",
help='The separator used between the namespace and keys'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
class AggregateImagePropertiesIsolation(filters.BaseHostFilter):
"""AggregateImagePropertiesIsolation works with image properties."""
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
"""Checks a host in an aggregate that metadata key/value match
with image properties.
"""
cfg_namespace = CONF.aggregate_image_properties_isolation_namespace
cfg_separator = CONF.aggregate_image_properties_isolation_separator
spec = filter_properties.get('request_spec', {})
image_props = spec.get('image', {}).get('properties', {})
context = filter_properties['context']
metadata = utils.aggregate_metadata_get_by_host(context,
host_state.host)
for key, options in metadata.iteritems():
if (cfg_namespace and
not key.startswith(cfg_namespace + cfg_separator)):
continue
prop = image_props.get(key)
if prop and prop not in options:
LOG.debug("%(host_state)s fails image aggregate properties "
"requirements. Property %(prop)s does not "
"match %(options)s.",
{'host_state': host_state,
'prop': prop,
'options': options})
return False
return True
|
the-stack_106_29978 | # 4.3 Using the Data Step to Create an .xdf File from a Data Frame
import os
import settings as st
import pandas as pd
import numpy as np
from revoscalepy import rx_data_step, rx_get_info
np.random.seed(39)
x1 = np.random.normal(size = 10000)
x2 = np.random.uniform(size = 10000)
x3 = x1 + x2
s = np.stack((x1,x2,x3))
s = np.transpose(s)
myData = pd.DataFrame(s, columns = ['x1','x2','x3']).query("x2 > .1")
# Export files directory
outFile = os.path.join(st.RESULTS_LOCATION, 'testFile.xdf')
rx_data_step(input_data = myData,
output_file = outFile,
rows_per_read = 5000,
overwrite = True)
print(rx_get_info(outFile))
|
the-stack_106_29981 | import demistomock as demisto
from CommonServerPython import *
import traceback
def main():
try:
from_date = demisto.args().get('from', '')
to_date = demisto.args().get('to', '')
query = 'type:"MITRE ATT&CK" and investigationsCount:>0 and -incident.type:"MITRE ATT&CK CoA"'
search_indicators = IndicatorsSearcher()
res = search_indicators.search_indicators_by_version(query=query, from_date=from_date, to_date=to_date)
indicators = []
for ind in res.get('iocs', []):
indicators.append({
'Value': dict_safe_get(ind, ['value']),
'Name': dict_safe_get(ind, ['CustomFields', 'mitrename']),
'Phase Name': dict_safe_get(ind, ['CustomFields', 'mitrekillchainphases', 0, 'phase_name']),
'Description': dict_safe_get(ind, ['CustomFields', 'mitredescription']),
})
incidents_table = tableToMarkdown('MITRE ATT&CK techniques by related Incidents', indicators,
headers=['Value', 'Name', 'Phase Name', 'Description'])
return_outputs(incidents_table)
except Exception:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute MITREIndicatorsByOpenIncidents script. Error: {traceback.format_exc()}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
the-stack_106_29982 | # Software released under the MIT license (see project root for license file)
from pyamf import amf3
# ------------------------------------------------------------------------------
suit_arr = [ 'Diamonds', 'Clubs', 'Hearts', 'Spades' ]
rank_arr = [ 'Ace', 'Two', 'Three', 'Four',
'Five', 'Six', 'Seven',
'Eight', 'Nine', 'Ten',
'Jack', 'Queen', 'King' ]
class Header():
def __init__(self, version = 1, tag = "not set"):
self.version = version
self.tag = tag
class Card:
def __init__(self, name = "", id = -1):
self.name = name
self.id = id
def get_name(self):
i = self.id - 1
return (rank_arr[i % len(rank_arr)]
+ ' of ' + suit_arr[i // len(rank_arr)])
class Deck:
def __init__(self):
self.cards = []
# ------------------------------------------------------------------------------
def buffer_to_file(file_name, encoder):
output_buffer = amf3.DataOutput(encoder)
f = open(file_name, "wb")
bytes = output_buffer.stream.getvalue()
f.write(bytes)
f.close()
return len(bytes)
def file_to_buffer(file_name):
f_in = open(file_name, "rb")
istream = amf3.util.BufferedByteStream(f_in)
f_in.close()
return istream
# ------------------------------------------------------------------------------
class write_context:
def __init__(self, ver = 1):
self.stream = amf3.util.BufferedByteStream()
self.encoder = amf3.Encoder(self.stream)
self.encoder.string_references = False # disables string caching
self.reorder_map = {}
self.ver = ver
self.set_version(ver)
def set_version(self, ver):
self.ver = ver
class read_context:
def __init__(self, file_name, ver = 1):
self.istream = file_to_buffer(file_name)
self.decoder = amf3.Decoder(self.istream)
self.bytes_read = len(self.istream)
print(file_name, len(self.istream), 'bytes read')
self.reorder_map = {}
self.ver = ver
self.set_version(ver)
def set_version(self, ver):
self.ver = ver
# ------------------------------------------------------------------------------
from vp_poker.vp_poker import *
def save_deck(file_name, deck):
wc = write_context(1) # header version is always 1
h = Header(get_high_version(), 'VP_POKER')
write_Header(wc, h)
wc.set_version(get_high_version())
write_Deck(wc, deck)
bytes_written = buffer_to_file(file_name, wc.encoder)
print('write: ', file_name,
', version=', get_high_version(), sep='', end='')
print(', cards=', len(deck.cards), sep='', end='')
print(', bytes=', bytes_written, sep='')
return(bytes_written)
# ------------------------------------------------------------------------------
def load_deck(file_name):
rc = read_context(file_name)
header = read_Header(rc)
if (not version_check(header.version)):
print('version test failed')
return None
rc.set_version(header.version)
deck = read_Deck(rc)
bytes_read = rc.bytes_read
print('read: ', file_name,
', version=', header.version, sep='', end='')
print(', cards=', len(deck.cards), sep='', end='')
print(', bytes=', bytes_read, sep='')
return(deck)
# ------------------------------------------------------------------------------
|
the-stack_106_29983 | import pathlib
import numpy as np
import pytest
import determined as det
from determined.tensorboard import SharedFSTensorboardManager, get_base_path, get_sync_path
from determined.tensorboard.metric_writers import util as metric_writers_util
from determined.tensorboard.util import get_rank_aware_path
BASE_PATH = pathlib.Path(__file__).resolve().parent.joinpath("fixtures")
def get_dummy_env() -> det.EnvContext:
return det.EnvContext(
master_url="",
master_cert_file=None,
master_cert_name=None,
experiment_config={"resources": {"slots_per_trial": 1, "native_parallel": False}},
latest_checkpoint=None,
steps_completed=0,
use_gpu=False,
container_gpus=[],
slot_ids=[],
debug=False,
hparams={"global_batch_size": 1},
det_trial_unique_port_offset=0,
det_trial_id="1",
det_agent_id="1",
det_experiment_id="1",
det_cluster_id="uuid-123",
trial_seed=0,
trial_run_id=1,
allocation_id="",
managed_training=True,
test_mode=False,
on_cluster=False,
)
def test_is_not_numerical_scalar() -> None:
# Invalid types
assert not metric_writers_util.is_numerical_scalar("foo")
assert not metric_writers_util.is_numerical_scalar(np.array("foo"))
assert not metric_writers_util.is_numerical_scalar(object())
# Invalid shapes
assert not metric_writers_util.is_numerical_scalar([1])
assert not metric_writers_util.is_numerical_scalar(np.array([3.14]))
assert not metric_writers_util.is_numerical_scalar(np.ones(shape=(5, 5)))
def test_is_numerical_scalar() -> None:
assert metric_writers_util.is_numerical_scalar(1)
assert metric_writers_util.is_numerical_scalar(1.0)
assert metric_writers_util.is_numerical_scalar(-3.14)
assert metric_writers_util.is_numerical_scalar(np.ones(shape=()))
assert metric_writers_util.is_numerical_scalar(np.array(1))
assert metric_writers_util.is_numerical_scalar(np.array(-3.14))
assert metric_writers_util.is_numerical_scalar(np.array([1.0])[0])
def test_list_tb_files(tmp_path: pathlib.Path) -> None:
env = get_dummy_env()
base_path = get_base_path({"base_path": BASE_PATH})
sync_path = get_sync_path(env.det_cluster_id, env.det_experiment_id, env.det_trial_id)
manager = SharedFSTensorboardManager(str(tmp_path), base_path, sync_path)
test_files = [
"no_show.txt",
"79375caf89e9.kernel_stats.pb",
"79375caf89e9.memory_profile.json.gz",
"events.out.tfevents.example",
]
test_filepaths = [BASE_PATH.joinpath("tensorboard--0", test_file) for test_file in test_files]
tb_files = manager.list_tb_files(0, lambda _: True)
assert set(test_filepaths) == set(tb_files)
def test_list_tb_files_nonexistent_directory(tmp_path: pathlib.Path) -> None:
env = get_dummy_env()
base_path = pathlib.Path("/non-existent-directory")
sync_path = get_sync_path(env.det_cluster_id, env.det_experiment_id, env.det_trial_id)
manager = SharedFSTensorboardManager(str(tmp_path), base_path, sync_path)
assert not pathlib.Path(base_path).exists()
assert manager.list_tb_files(0, lambda _: True) == []
test_data = [
(
"/home/bob/tensorboard/the-host-name.memory_profile.json.gz",
3,
"/home/bob/tensorboard/the-host-name#3.memory_profile.json.gz",
),
(
"/home/bob/tensorboard/the-host-name.some-extension.gz",
2,
"/home/bob/tensorboard/the-host-name.some-extension.gz",
),
]
@pytest.mark.parametrize("path,rank,expected", test_data)
def test_get_rank_aware_path(path: str, rank: int, expected: str) -> None:
actual = get_rank_aware_path(pathlib.Path(path), rank)
assert pathlib.Path(expected) == actual, (expected, actual)
|
the-stack_106_29986 | import numpy as np
def get_hop_distance(num_node, edge, max_hop=1):
adj_mat = np.zeros((num_node, num_node))
for i, j in edge:
adj_mat[i, j] = 1
adj_mat[j, i] = 1
# compute hop steps
hop_dis = np.zeros((num_node, num_node)) + np.inf
transfer_mat = [
np.linalg.matrix_power(adj_mat, d) for d in range(max_hop + 1)
]
arrive_mat = (np.stack(transfer_mat) > 0)
for d in range(max_hop, -1, -1):
hop_dis[arrive_mat[d]] = d
return hop_dis
def normalize_digraph(adj_matrix):
Dl = np.sum(adj_matrix, 0)
num_nodes = adj_matrix.shape[0]
Dn = np.zeros((num_nodes, num_nodes))
for i in range(num_nodes):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-1)
norm_matrix = np.dot(adj_matrix, Dn)
return norm_matrix
class Graph:
"""The Graph to model the skeletons extracted by the openpose.
Args:
layout (str): must be one of the following candidates
- openpose: Is consists of 18 joints. For more information, please
refer to
https://github.com/CMU-Perceptual-Computing-Lab/openpose#output
- ntu-rgb+d: Is consists of 25 joints. For more information, please
refer to https://github.com/shahroudy/NTURGB-D
strategy (str): must be one of the follow candidates
- uniform: Uniform Labeling
- distance: Distance Partitioning
- spatial: Spatial Configuration
For more information, please refer to the section 'Partition
Strategies' in our paper (https://arxiv.org/abs/1801.07455).
max_hop (int): the maximal distance between two connected nodes.
Dafault: 1
dilation (int): controls the spacing between the kernel points.
Default: 1
"""
def __init__(self,
layout='openpose',
strategy='uniform',
max_hop=1,
dilation=1):
self.max_hop = max_hop
self.dilation = dilation
assert layout in ['openpose', 'ntu-rgb+d', 'ntu_edge', 'coco']
assert strategy in ['uniform', 'distance', 'spatial']
self.get_edge(layout)
self.hop_dis = get_hop_distance(
self.num_node, self.edge, max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
return self.A
def get_edge(self, layout):
"""This method returns the edge pairs of the layout."""
if layout == 'openpose':
self.num_node = 18
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5),
(13, 12), (12, 11), (10, 9), (9, 8), (11, 5),
(8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0),
(17, 15), (16, 14)]
self.edge = self_link + neighbor_link
self.center = 1
elif layout == 'ntu-rgb+d':
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (2, 21), (3, 21),
(4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21),
(10, 9), (11, 10), (12, 11), (13, 1), (14, 13),
(15, 14), (16, 15), (17, 1), (18, 17), (19, 18),
(20, 19), (22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 21 - 1
elif layout == 'ntu_edge':
self.num_node = 24
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6),
(8, 7), (9, 2), (10, 9), (11, 10), (12, 11),
(13, 1), (14, 13), (15, 14), (16, 15), (17, 1),
(18, 17), (19, 18), (20, 19), (21, 22), (22, 8),
(23, 24), (24, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 2
elif layout == 'coco':
self.num_node = 17
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13],
[6, 12], [7, 13], [6, 7], [8, 6], [9, 7],
[10, 8], [11, 9], [2, 3], [2, 1], [3, 1], [4, 2],
[5, 3], [4, 6], [5, 7]]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 0
else:
raise ValueError('Do Not Exist This Layout.')
def get_adjacency(self, strategy):
"""This method returns the adjacency matrix according to strategy."""
valid_hop = range(0, self.max_hop + 1, self.dilation)
adjacency = np.zeros((self.num_node, self.num_node))
for hop in valid_hop:
adjacency[self.hop_dis == hop] = 1
normalize_adjacency = normalize_digraph(adjacency)
if strategy == 'uniform':
A = np.zeros((1, self.num_node, self.num_node))
A[0] = normalize_adjacency
self.A = A
elif strategy == 'distance':
A = np.zeros((len(valid_hop), self.num_node, self.num_node))
for i, hop in enumerate(valid_hop):
A[i][self.hop_dis == hop] = normalize_adjacency[self.hop_dis ==
hop]
self.A = A
elif strategy == 'spatial':
A = []
for hop in valid_hop:
a_root = np.zeros((self.num_node, self.num_node))
a_close = np.zeros((self.num_node, self.num_node))
a_further = np.zeros((self.num_node, self.num_node))
for i in range(self.num_node):
for j in range(self.num_node):
if self.hop_dis[j, i] == hop:
if self.hop_dis[j, self.center] == self.hop_dis[
i, self.center]:
a_root[j, i] = normalize_adjacency[j, i]
elif self.hop_dis[j, self.center] > self.hop_dis[
i, self.center]:
a_close[j, i] = normalize_adjacency[j, i]
else:
a_further[j, i] = normalize_adjacency[j, i]
if hop == 0:
A.append(a_root)
else:
A.append(a_root + a_close)
A.append(a_further)
A = np.stack(A)
self.A = A
else:
raise ValueError('Do Not Exist This Strategy')
|
the-stack_106_29988 | # -*- coding: utf-8 -*-
import logging
import pytest
from libtmux import exc
from libtmux.server import Server
from libtmux.test import TEST_SESSION_PREFIX, get_test_session_name, namer
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def server(request):
t = Server()
t.socket_name = 'tmuxp_test%s' % next(namer)
def fin():
t.kill_server()
request.addfinalizer(fin)
return t
@pytest.fixture(scope='function')
def session(request, server):
session_name = 'tmuxp'
if not server.has_session(session_name):
server.cmd('new-session', '-d', '-s', session_name)
# find current sessions prefixed with tmuxp
old_test_sessions = [
s.get('session_name')
for s in server._sessions
if s.get('session_name').startswith(TEST_SESSION_PREFIX)
]
TEST_SESSION_NAME = get_test_session_name(server=server)
try:
session = server.new_session(session_name=TEST_SESSION_NAME)
except exc.LibTmuxException as e:
raise e
"""
Make sure that tmuxp can :ref:`test_builder_visually` and switches to
the newly created session for that testcase.
"""
try:
server.switch_client(session.get('session_id'))
pass
except exc.LibTmuxException as e:
# server.attach_session(session.get('session_id'))
pass
for old_test_session in old_test_sessions:
logger.debug('Old test test session %s found. Killing it.' % old_test_session)
server.kill_session(old_test_session)
assert TEST_SESSION_NAME == session.get('session_name')
assert TEST_SESSION_NAME != 'tmuxp'
return session
@pytest.fixture()
def tmpdir(tmpdir_factory):
fn = tmpdir_factory.mktemp('tmuxp')
return fn
|
the-stack_106_29990 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import math
from sklearn.metrics import auc
import numpy as np
import cv2
import os, sys
int_ = lambda x: int(round(x))
def IoU( r1, r2 ):
x11, y11, w1, h1 = r1
x21, y21, w2, h2 = r2
x12 = x11 + w1; y12 = y11 + h1
x22 = x21 + w2; y22 = y21 + h2
x_overlap = max(0, min(x12,x22) - max(x11,x21) )
y_overlap = max(0, min(y12,y22) - max(y11,y21) )
I = 1. * x_overlap * y_overlap
U = (y12-y11)*(x12-x11) + (y22-y21)*(x22-x21) - I
J = I/U
return J
def evaluate_iou( rect_gt, rect_pred ):
# score of iou
score = [ IoU(i, j) for i, j in zip(rect_gt, rect_pred) ]
return score
def compute_score( x, w, h ):
# score of response strength
k = np.ones( (h, w) )
score = cv2.filter2D(x, -1, k)
score[:, :w//2] = 0
score[:, math.ceil(-w/2):] = 0
score[:h//2, :] = 0
score[math.ceil(-h/2):, :] = 0
return score
def locate_bbox( a, w, h ):
row = np.argmax( np.max(a, axis=1) )
col = np.argmax( np.max(a, axis=0) )
x = col - 1. * w / 2
y = row - 1. * h / 2
return x, y, w, h
def score2curve( score, thres_delta = 0.01 ):
thres = np.linspace( 0, 1, int(1./thres_delta)+1 )
success_num = []
for th in thres:
success_num.append( np.sum(score >= (th+1e-6)) )
success_rate = np.array(success_num) / len(score)
return thres, success_rate
def all_sample_iou( score_list, gt_list):
num_samples = len(score_list)
iou_list = []
for idx in range(num_samples):
score, image_gt = score_list[idx], gt_list[idx]
w, h = image_gt[2:]
pred_rect = locate_bbox( score, w, h )
iou = IoU( image_gt, pred_rect )
iou_list.append( iou )
return iou_list
def plot_success_curve( iou_score, title='' ):
thres, success_rate = score2curve( iou_score, thres_delta = 0.05 )
auc_ = np.mean( success_rate[:-1] ) # this is same auc protocol as used in previous template matching papers #auc_ = auc( thres, success_rate ) # this is the actual auc
plt.figure()
plt.grid(True)
plt.xticks(np.linspace(0,1,11))
plt.yticks(np.linspace(0,1,11))
plt.ylim(0, 1)
plt.title(title + 'auc={}'.format(auc_))
plt.plot( thres, success_rate )
plt.show()
|
the-stack_106_29991 | #!/usr/bin/env python
import os
# import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from joblib import Memory
from . import paths
from ..utils import files
from . import viz
_memory = Memory('./')
def _list_csvs(directory):
return files.listFilesInDir(directory, endswith='.csv', absPaths=True)
ELECTRIC_PATHS = _list_csvs(paths.AMPD2_POWER)
GAS_PATHS = _list_csvs(paths.AMPD2_GAS)
WATER_PATHS = _list_csvs(paths.AMPD2_WATER)
WEATHER_PATHS = _list_csvs(paths.AMPD2_WEATHER)
ELECTRIC_COLS = 'UNIX_TS,WHE,RSE,GRE,MHE,B1E,BME,CWE,DWE,EQE,FRE,HPE,OFE,' \
'UTE,WOE,B2E,CDE,DNE,EBE,FGE,HTE,OUE,TVE,UNE'.split(',')
ELECTRIC_DATA_COLS = ELECTRIC_COLS[1:]
# ELECTRIC_DATA_COLS.remove('MHE') # linear combo of other cols
# ELECTRIC_DATA_COLS.remove('UNE') # linear combo of other cols
GAS_DATA_COLS = ['counter', 'avg_rate', 'inst_rate']
WATER_DATA_COLS = ['counter', 'avg_rate']
WEATHER_TIME_COL = 'Date/Time'
WEATHER_DATA_COLS = ['Temp (C)', 'Dew Point Temp (C)', 'Rel Hum (%)',
'Wind Dir (10s deg)', 'Wind Spd (km/h)',
'Visibility (km)', 'Stn Press (kPa)']
WEATHER_ALL_COLS = [WEATHER_TIME_COL] + WEATHER_DATA_COLS
FIG_SAVE_DIR = os.path.join('figs', 'ampds')
# ================================================================ public
class HouseRecording(object):
def __init__(self, path, cols=None):
data = _read_file(path)
self.path = path
self.name = os.path.basename(path).split('.')[0]
self.col_names = cols
self.sampleTimes = data[:, 0]
self.data = data[:, 1:] # XXX have to use all cols after the first
# if 'power' in self.name:
# print "initial sample times: ", self.sampleTimes[:50]
# print
# hack to deal with DWW water not having inst_rate
# self.col_names = self.col_names[:self.data.shape[1]]
self.data = self.data[:, :len(self.col_names)]
class WeatherRecording(object):
def __init__(self):
df = _load_weather_data()
self.name = 'weather'
self.col_names = WEATHER_DATA_COLS
self.sampleTimes = _datetime_strs_to_unix_timestamps(df[WEATHER_TIME_COL])
self.data = df[WEATHER_DATA_COLS].values.astype(np.float32)
# ------------------------ top-level data loading functions
def all_power_recordings():
return [HouseRecording(path, cols=ELECTRIC_DATA_COLS) for path in ELECTRIC_PATHS]
def all_gas_recordings():
return [HouseRecording(path, cols=GAS_DATA_COLS) for path in GAS_PATHS]
def all_water_recordings():
return [HouseRecording(path, cols=WATER_DATA_COLS) for path in WATER_PATHS]
def all_weather_recordings():
return [WeatherRecording()] # just one data file, so just one recording
def all_timestamp_recordings():
all_recordings = all_power_recordings() + all_gas_recordings() + \
all_water_recordings() + all_weather_recordings()
# all_recordings = all_weather_recordings() # TODO rm
for r in all_recordings:
r.data = r.sampleTimes.astype(np.float64)
r.name += '_timestamps'
return all_recordings
# ================================================================ private
# def _read_file(path, cols=None):
@_memory.cache
def _read_file(path):
df = pd.read_csv(path).fillna(method='backfill') # hold prev val
# if cols is not None and len(cols) > 0:
# timestamps = df[df.columns[0]]
# return df.values.astype(np.int32)
return df.values.astype(np.float64) # need f64 to not lose timestamps
@_memory.cache
def _load_weather_data():
path = WEATHER_PATHS[0]
df = pd.read_csv(path, sep=',').fillna(method='backfill') # hold prev val
return df[WEATHER_ALL_COLS]
def _datetimes_to_unix_timestamps(datetimes):
# https://stackoverflow.com/q/34038273
return (datetimes.astype(np.int64) / 1e6).astype(np.uint64)
def _datetime_strs_to_unix_timestamps(strs):
return _datetimes_to_unix_timestamps(pd.to_datetime(strs))
# ================================================================ main
def main():
recordings = []
recordings += all_gas_recordings()
recordings += all_water_recordings()
recordings += all_power_recordings()
recordings += all_weather_recordings()
norm_means = False
# norm_means = True
mins_zero = True
viz.plot_recordings(recordings, norm_means=norm_means, mins_zero=mins_zero,
savedir=FIG_SAVE_DIR)
# plt.show()
if __name__ == '__main__':
main()
|
the-stack_106_29993 | import os
import psutil
import time
def GetOtherMainProcesses():
this_pid = psutil.Process().pid
pids = set()
for proc in psutil.process_iter():
pid = proc.pid
ppid = proc.ppid()
if pid == 1 or pid == this_pid or ppid != 0:
# ignore the pause container, our own pid, and non-root processes
continue
pids.add(pid)
return pids
def WaitPIDs(pids, poll_interval_seconds=1, timeout_seconds=0, is_wait_all=False, completed_marked_dir=""):
start = 0
pids = set(pids)
if poll_interval_seconds <= 0:
raise Exception("Poll interval seconds must be a positive integer")
while (timeout_seconds <= 0 or start < timeout_seconds) and len(pids) > 0:
stop_pids = set()
for pid in pids:
path = "/proc/%d" % pid
if os.path.isdir(path):
continue
else:
if completed_marked_dir:
mark_file = os.path.join(completed_marked_dir, "%d.pid" % pid)
with open(mark_file) as file_obj:
contents = file_obj.read()
if contents.strip() != "completed":
raise Exception("Pid %d hadn't completed" % pid)
if is_wait_all:
stop_pids.add(pid)
else:
return
if is_wait_all:
pids = pids - stop_pids
time.sleep(poll_interval_seconds)
start = start + poll_interval_seconds
def WaitOtherMainProcesses(poll_interval_seconds=1, timeout_seconds=0, is_wait_all=False, completed_marked_dir=""):
return WaitPIDs(GetOtherMainProcesses(), poll_interval_seconds, timeout_seconds, is_wait_all, completed_marked_dir)
|
the-stack_106_29994 | import os
import sys
import warnings
import logging as log
from typing import Union, Optional, List, Dict, Tuple, Iterable
from argparse import ArgumentParser
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from torch.optim.optimizer import Optimizer
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.profiler.profiler import BaseProfiler
from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin
from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin
from pytorch_lightning.trainer.distrib_parts import (
TrainerDPMixin,
parse_gpu_ids,
determine_root_gpu_device
)
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.training_io import TrainerIOMixin
from pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin
from pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.utilities.debugging import MisconfigurationException
from pytorch_lightning.profiler import Profiler, PassThroughProfiler
from pytorch_lightning.callbacks import Callback
try:
from apex import amp
except ImportError:
APEX_AVAILABLE = False
else:
APEX_AVAILABLE = True
try:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class Trainer(TrainerIOMixin,
TrainerDPMixin,
TrainerDDPMixin,
TrainerLoggingMixin,
TrainerModelHooksMixin,
TrainerTrainingTricksMixin,
TrainerDataLoadingMixin,
TrainerAMPMixin,
TrainerEvaluationLoopMixin,
TrainerTrainLoopMixin,
TrainerCallbackConfigMixin,
TrainerCallbackHookMixin
):
def __init__(
self,
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
checkpoint_callback: Union[ModelCheckpoint, bool] = True,
early_stop_callback: Optional[Union[EarlyStopping, bool]] = False,
callbacks: List[Callback] = [],
default_save_path: Optional[str] = None,
gradient_clip_val: float = 0,
gradient_clip=None, # backward compatible, todo: remove in v0.8.0
process_position: int = 0,
nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0
num_nodes: int = 1,
gpus: Optional[Union[List[int], str, int]] = None,
num_tpu_cores: Optional[int] = None,
log_gpu_memory: Optional[str] = None,
show_progress_bar: bool = True,
progress_bar_refresh_rate: int = 50,
overfit_pct: float = 0.0,
track_grad_norm: int = -1,
check_val_every_n_epoch: int = 1,
fast_dev_run: bool = False,
accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1,
max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
max_epochs: int = 1000,
min_epochs: int = 1,
max_steps: Optional[int] = None,
min_steps: Optional[int] = None,
train_percent_check: float = 1.0,
val_percent_check: float = 1.0,
test_percent_check: float = 1.0,
val_check_interval: float = 1.0,
log_save_interval: int = 100,
row_log_interval: int = 10,
add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0
distributed_backend: Optional[str] = None,
use_amp=False, # backward compatible, todo: remove in v0.8.0
precision: int = 32,
print_nan_grads: bool = False,
weights_summary: str = 'full',
weights_save_path: Optional[str] = None,
amp_level: str = 'O1',
nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0
num_sanity_val_steps: int = 5,
truncated_bptt_steps: Optional[int] = None,
resume_from_checkpoint: Optional[str] = None,
profiler: Optional[BaseProfiler] = None,
benchmark: bool = False,
reload_dataloaders_every_epoch: bool = False,
**kwargs
):
r"""
Customize every aspect of training via flags
Args:
logger: Logger (or iterable collection of loggers) for experiment tracking.
Example::
from pytorch_lightning.loggers import TensorBoardLogger
# default logger used by trainer
logger = TensorBoardLogger(
save_dir=os.getcwd(),
version=self.slurm_job_id,
name='lightning_logs'
)
Trainer(logger=logger)
checkpoint_callback: Callback for checkpointing.
Example::
from pytorch_lightning.callbacks import ModelCheckpoint
# default used by the Trainer
checkpoint_callback = ModelCheckpoint(
filepath=os.getcwd(),
save_best_only=True,
verbose=True,
monitor='val_loss',
mode='min',
prefix=''
)
trainer = Trainer(checkpoint_callback=checkpoint_callback)
early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`):
Callback for early stopping.
If set to ``True``, then a default callback monitoring ``'val_loss'`` is created.
Will raise an error if ``'val_loss'`` is not found.
If set to ``False``, then early stopping will be disabled.
If set to ``None``, then the default callback monitoring ``'val_loss'`` is created.
If ``'val_loss'`` is not found will work as if early stopping is disabled.
Default: ``None``.
Example::
from pytorch_lightning.callbacks import EarlyStopping
# default used by the Trainer
early_stop_callback = EarlyStopping(
monitor='val_loss',
patience=3,
strict=False,
verbose=False,
mode='min'
)
trainer = Trainer(early_stop_callback=early_stop_callback)
callbacks: Add a list of callbacks.
Example::
from pytorch_lightning.callbacks import Callback
class PrintCallback(Callback):
def on_train_start(self):
print("Training is started!")
def on_train_end(self):
print(f"Training is done. The logs are: {self.trainer.logs}")
# a list of callbacks
callbacks = [PrintCallback()]
trainer = Trainer(callbacks=callbacks)
default_save_path: Default path for logs and weights when no logger/ckpt_callback passed
Example::
# default used by the Trainer
trainer = Trainer(default_save_path=os.getcwd())
gradient_clip_val: 0 means don't clip.
Example::
# default used by the Trainer
trainer = Trainer(gradient_clip_val=0.0)
gradient_clip:
.. warning: .. deprecated:: 0.5.0
Use `gradient_clip_val` instead. Will remove 0.8.0.
process_position: orders the tqdm bar when running multiple models on same machine.
Example::
# default used by the Trainer
trainer = Trainer(process_position=0)
num_nodes: number of GPU nodes for distributed training.
Example::
# default used by the Trainer
trainer = Trainer(num_nodes=1)
# to train on 8 nodes
trainer = Trainer(num_nodes=8)
nb_gpu_nodes:
..warning:: .. deprecated:: 0.5.0
Use `num_nodes` instead. Will remove 0.8.0.
gpus: Which GPUs to train on.
Example::
# default used by the Trainer (ie: train on CPU)
trainer = Trainer(gpus=None)
# int: train on 2 gpus
trainer = Trainer(gpus=2)
# list: train on GPUs 1, 4 (by bus ordering)
trainer = Trainer(gpus=[1, 4])
trainer = Trainer(gpus='1, 4') # equivalent
# -1: train on all gpus
trainer = Trainer(gpus=-1)
trainer = Trainer(gpus='-1') # equivalent
# combine with num_nodes to train on multiple GPUs across nodes
trainer = Trainer(gpus=2, num_nodes=4) # uses 8 gpus in total
num_tpu_cores: How many TPU cores to train on (1 or 8).
A single TPU v2 or v3 has 8 cores. A TPU pod has
up to 2048 cores. A slice of a POD means you get as many cores
as you request.
You MUST use DistributedDataSampler with your dataloader for this
to work. Your effective batch size is batch_size * total tpu cores.
This parameter can be either 1 or 8.
Example::
# your_trainer_file.py
# default used by the Trainer (ie: train on CPU)
trainer = Trainer(num_tpu_cores=None)
# int: train on a single core
trainer = Trainer(num_tpu_cores=1)
# int: train on all cores few cores
trainer = Trainer(num_tpu_cores=8)
# for 8+ cores must submit via xla script with
# a max of 8 cores specified. The XLA script
# will duplicate script onto each TPU in the POD
trainer = Trainer(num_tpu_cores=8)
# -1: train on all available TPUs
trainer = Trainer(num_tpu_cores=-1)
To train on more than 8 cores (ie: a POD),
submit this script using the xla_dist script.
Example::
$ python -m torch_xla.distributed.xla_dist
--tpu=$TPU_POD_NAME
--conda-env=torch-xla-nightly
--env=XLA_USE_BF16=1
-- python your_trainer_file.py
log_gpu_memory: None, 'min_max', 'all'. Might slow performance
because it uses the output of nvidia-smi.
Example::
# default used by the Trainer
trainer = Trainer(log_gpu_memory=None)
# log all the GPUs (on master node only)
trainer = Trainer(log_gpu_memory='all')
# log only the min and max memory on the master node
trainer = Trainer(log_gpu_memory='min_max')
show_progress_bar: If true shows tqdm progress bar
Example::
# default used by the Trainer
trainer = Trainer(show_progress_bar=True)
progress_bar_refresh_rate: How often to refresh progress bar (in steps)
overfit_pct: uses this much data of all datasets.
Example::
# default used by the Trainer
trainer = Trainer(overfit_pct=0.0)
# use only 1% of the train, test, val datasets
trainer = Trainer(overfit_pct=0.01)
track_grad_norm: -1 no tracking. Otherwise tracks that norm
Example::
# default used by the Trainer
trainer = Trainer(track_grad_norm=-1)
# track the 2-norm
trainer = Trainer(track_grad_norm=2)
check_val_every_n_epoch: Check val every n train epochs.
Example::
# default used by the Trainer
trainer = Trainer(check_val_every_n_epoch=1)
# run val loop every 10 training epochs
trainer = Trainer(check_val_every_n_epoch=10)
fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test).
Example::
# default used by the Trainer
trainer = Trainer(fast_dev_run=False)
# runs 1 train, val, test batch and program ends
trainer = Trainer(fast_dev_run=True)
accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.
Example::
# default used by the Trainer (no accumulation)
trainer = Trainer(accumulate_grad_batches=1)
# accumulate every 4 batches (effective batch size is batch*4)
trainer = Trainer(accumulate_grad_batches=4)
# no accumulation for epochs 1-4. accumulate 3 for epochs 5-10. accumulate 20 after that
trainer = Trainer(accumulate_grad_batches={5: 3, 10: 20})
max_epochs: Stop training once this number of epochs is reached.
Example::
# default used by the Trainer
trainer = Trainer(max_epochs=1000)
max_nb_epochs:
.. warning:: .. deprecated:: 0.5.0
Use `max_epochs` instead. Will remove 0.8.0.
min_epochs: Force training for at least these many epochs
Example::
# default used by the Trainer
trainer = Trainer(min_epochs=1)
min_nb_epochs:
.. warning:: .. deprecated:: 0.5.0
Use `min_nb_epochs` instead. Will remove 0.8.0.
max_steps: Stop training after this number of steps. Disabled by default (None).
Training will stop if max_steps or max_epochs have reached (earliest).
Example::
# Stop after 100 steps
trainer = Trainer(max_steps=100)
min_steps: Force training for at least these number of steps. Disabled by default (None).
Trainer will train model for at least min_steps or min_epochs (latest).
Example::
# Run at least for 100 steps (disable min_epochs)
trainer = Trainer(min_steps=100, min_epochs=0)
train_percent_check: How much of training dataset to check.
Useful when debugging or testing something that happens at the end of an epoch.
Example::
# default used by the Trainer
trainer = Trainer(train_percent_check=1.0)
# run through only 25% of the training set each epoch
trainer = Trainer(train_percent_check=0.25)
val_percent_check: How much of validation dataset to check.
Useful when debugging or testing something that happens at the end of an epoch.
Example::
# default used by the Trainer
trainer = Trainer(val_percent_check=1.0)
# run through only 25% of the validation set each epoch
trainer = Trainer(val_percent_check=0.25)
test_percent_check: How much of test dataset to check.
Useful when debugging or testing something that happens at the end of an epoch.
Example::
# default used by the Trainer
trainer = Trainer(test_percent_check=1.0)
# run through only 25% of the test set each epoch
trainer = Trainer(test_percent_check=0.25)
val_check_interval: How often within one training epoch to check the validation set
If float, % of tng epoch. If int, check every n batch
Example::
# default used by the Trainer
trainer = Trainer(val_check_interval=1.0)
# check validation set 4 times during a training epoch
trainer = Trainer(val_check_interval=0.25)
# check validation set every 1000 training batches
# use this when using iterableDataset and your dataset has no length
# (ie: production cases with streaming data)
trainer = Trainer(val_check_interval=1000)
log_save_interval: Writes logs to disk this often
Example::
# default used by the Trainer
trainer = Trainer(log_save_interval=100)
row_log_interval: How often to add logging rows (does not write to disk)
Example::
# default used by the Trainer
trainer = Trainer(row_log_interval=10)
add_row_log_interval:
.. warning:: .. deprecated:: 0.5.0
Use `row_log_interval` instead. Will remove 0.8.0.
distributed_backend: The distributed backend to use.
Options: 'dp', 'ddp', 'ddp2'.
Example::
# default used by the Trainer
trainer = Trainer(distributed_backend=None)
# dp = DataParallel (split a batch onto k gpus on same machine).
trainer = Trainer(gpus=2, distributed_backend='dp')
# ddp = DistributedDataParallel
# Each gpu trains by itself on a subset of the data.
# Gradients sync across all gpus and all machines.
trainer = Trainer(gpus=2, num_nodes=2, distributed_backend='ddp')
# ddp2 = DistributedDataParallel + dp
# behaves like dp on every node
# syncs gradients across nodes like ddp
# useful for things like increasing the number of negative samples
trainer = Trainer(gpus=2, num_nodes=2, distributed_backend='ddp2')
use_amp:
.. warning:: .. deprecated:: 0.6.1
Use `precision` instead. Will remove 0.8.0.
precision: Full precision (32), half precision (16).
Can be used on CPU, GPU or TPUs.
If used on TPU will use torch.bfloat16 but tensor printing
will still show torch.float32.
Example::
# default used by the Trainer
trainer = Trainer(precision=32)
# 16-bit precision
trainer = Trainer(precision=16)
# one day
trainer = Trainer(precision=8|4|2)
print_nan_grads: Prints gradients with nan values
Example::
# default used by the Trainer
trainer = Trainer(print_nan_grads=False)
weights_summary: Prints a summary of the weights when training begins.
Options: 'full', 'top', None.
Example::
# default used by the Trainer (ie: print all weights)
trainer = Trainer(weights_summary='full')
# print only the top level modules
trainer = Trainer(weights_summary='top')
# don't print a summary
trainer = Trainer(weights_summary=None)
weights_save_path: Where to save weights if specified.
Example::
# default used by the Trainer
trainer = Trainer(weights_save_path=os.getcwd())
# save to your custom path
trainer = Trainer(weights_save_path='my/path')
# if checkpoint callback used, then overrides the weights path
# **NOTE: this saves weights to some/path NOT my/path
checkpoint_callback = ModelCheckpoint(filepath='some/path')
trainer = Trainer(
checkpoint_callback=checkpoint_callback,
weights_save_path='my/path'
)
amp_level: The optimization level to use (O1, O2, etc...).
Check nvidia docs for level (https://nvidia.github.io/apex/amp.html#opt-levels)
Example::
# default used by the Trainer
trainer = Trainer(amp_level='O1')
num_sanity_val_steps: Sanity check runs n batches of val before starting the training routine.
This catches any bugs in your validation without having to wait for the first validation check.
The Trainer uses 5 steps by default. Turn it off or modify it here.
Example::
# default used by the Trainer
trainer = Trainer(num_sanity_val_steps=5)
# turn it off
trainer = Trainer(num_sanity_val_steps=0)
nb_sanity_val_steps:
.. warning:: .. deprecated:: 0.5.0
Use `num_sanity_val_steps` instead. Will remove 0.8.0.
truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of
a much longer sequence If this is enabled, your batches will automatically get truncated
and the trainer will apply Truncated Backprop to it. Make sure your batches have a sequence
dimension. (`Williams et al. "An efficient gradient-based algorithm for on-line training of
recurrent network trajectories."
<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.56.7941&rep=rep1&type=pdf>`_)
Example::
# default used by the Trainer (ie: disabled)
trainer = Trainer(truncated_bptt_steps=None)
# backprop every 5 steps in a batch
trainer = Trainer(truncated_bptt_steps=5)
Lightning takes care to split your batch along the time-dimension.
.. note:: If you need to modify how the batch is split,
override :meth:`pytorch_lightning.core.LightningModule.tbptt_split_batch`.
.. note:: Using this feature requires updating your LightningModule's
:meth:`pytorch_lightning.core.LightningModule.training_step` to include a `hiddens` arg.
resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here.k
Example::
# default used by the Trainer
trainer = Trainer(resume_from_checkpoint=None)
# resume from a specific checkpoint
trainer = Trainer(resume_from_checkpoint='some/path/to/my_checkpoint.ckpt')
profiler: To profile individual steps during training and assist in
identifying bottlenecks.
Example::
from pytorch_lightning.profiler import Profiler, AdvancedProfiler
# default used by the Trainer
trainer = Trainer(profiler=None)
# to profile standard training events
trainer = Trainer(profiler=True)
# equivalent to profiler=True
profiler = Profiler()
trainer = Trainer(profiler=profiler)
# advanced profiler for function-level stats
profiler = AdvancedProfiler()
trainer = Trainer(profiler=profiler)
reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch
benchmark (bool): If true enables cudnn.benchmark.
This flag is likely to increase the speed of your system if your
input sizes don't change. However, if it does, then it will likely
make your system slower.
The speedup comes from allowing the cudnn auto-tuner to find the best
algorithm for the hardware `[see discussion here]
<https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936>`_.
.. warning:: Following arguments become deprecated and they will be removed in v0.8.0:
- `nb_sanity_val_steps`
"""
# Init callbacks
self.callbacks = callbacks
self.on_init_start()
# benchmarking
self.benchmark = benchmark
if benchmark:
torch.backends.cudnn.benchmark = True
# Transfer params
# Backward compatibility
self.num_nodes = num_nodes
if nb_gpu_nodes is not None:
warnings.warn("`nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not num_nodes: # in case you did not set the proper value
num_nodes = nb_gpu_nodes
self.num_gpu_nodes = num_nodes
self.log_gpu_memory = log_gpu_memory
# Backward compatibility
if gradient_clip is not None:
warnings.warn("`gradient_clip` has renamed to `gradient_clip_val` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not gradient_clip_val: # in case you did not set the proper value
gradient_clip_val = gradient_clip
self.gradient_clip_val = gradient_clip_val
self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch
self.progress_bar_refresh_rate = progress_bar_refresh_rate
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
# tpu config
self.on_tpu = num_tpu_cores is not None
self.num_tpu_cores = num_tpu_cores
assert num_tpu_cores in [1, 8, None], 'num_tpu_cores can only be 1 or 8'
self.process_position = process_position
self.weights_summary = weights_summary
# Backward compatibility
if max_nb_epochs is not None:
warnings.warn("`max_nb_epochs` has renamed to `max_epochs` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not max_epochs: # in case you did not set the proper value
max_epochs = max_nb_epochs
self.max_epochs = max_epochs
# Backward compatibility
if min_nb_epochs is not None:
warnings.warn("`min_nb_epochs` has renamed to `min_epochs` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not min_epochs: # in case you did not set the proper value
min_epochs = min_nb_epochs
self.min_epochs = min_epochs
self.max_steps = max_steps
self.min_steps = min_steps
# Backward compatibility
if nb_sanity_val_steps is not None:
warnings.warn("`nb_sanity_val_steps` has renamed to `num_sanity_val_steps` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not num_sanity_val_steps: # in case you did not set the proper value
num_sanity_val_steps = nb_sanity_val_steps
self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.truncated_bptt_steps = truncated_bptt_steps
self.resume_from_checkpoint = resume_from_checkpoint
self.shown_warnings = set()
self.fast_dev_run = fast_dev_run
if self.fast_dev_run:
self.num_sanity_val_steps = 1
self.max_epochs = 1
m = '''
Running in fast_dev_run mode: will run a full train,
val loop using a single batch
'''
log.info(m)
# set default save path if user didn't provide one
self.default_save_path = default_save_path
if self.default_save_path is None:
self.default_save_path = os.getcwd()
# training bookeeping
self.total_batch_idx = 0
self.running_loss = []
self.avg_loss = 0
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.train_dataloader = None
self.test_dataloaders = None
self.val_dataloaders = None
# training state
self.model = None
self.testing = False
self.disable_validation = False
self.lr_schedulers = []
self.optimizers = None
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
# configure logger
self.configure_logger(logger)
# configure profiler
if profiler is True:
profiler = Profiler()
self.profiler = profiler or PassThroughProfiler()
# configure early stop callback
# creates a default one if none passed in
self.configure_early_stopping(early_stop_callback)
self.reduce_lr_on_plateau_scheduler = None
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.weights_save_path = weights_save_path
# accumulated grads
self.accumulate_grad_batches = accumulate_grad_batches
self.configure_accumulated_gradients(accumulate_grad_batches)
# allow int, string and gpu list
self.gpus = gpus
self.data_parallel_device_ids = parse_gpu_ids(self.gpus)
self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)
# tpu state flags
self.use_tpu = False
self.tpu_local_core_rank = None
self.tpu_global_core_rank = None
# distributed backend choice
self.use_ddp = False
self.use_ddp2 = False
self.use_dp = False
self.single_gpu = False
self.distributed_backend = distributed_backend
self.set_distributed_mode(distributed_backend, num_nodes)
# override dist backend when using tpus
if self.on_tpu:
self.init_tpu()
self.current_tpu_idx = None
# init flags for SLURM+ddp to work
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
self.configure_slurm_ddp(num_nodes)
# nvidia setup
self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
# backward compatibility
if add_row_log_interval is not None:
warnings.warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not row_log_interval: # in case you did not set the proper value
row_log_interval = add_row_log_interval
self.row_log_interval = row_log_interval
# how much of the data to use
self.overfit_pct = overfit_pct
self.determine_data_use_amount(train_percent_check, val_percent_check,
test_percent_check, overfit_pct)
# 16 bit mixed precision training using apex
self.amp_level = amp_level
self.precision = precision
assert self.precision in (16, 32), 'only 32 or 16 bit precision supported'
if self.precision == 16 and num_tpu_cores is None:
use_amp = True
self.init_amp(use_amp)
# Callback system
self.on_init_end()
@property
def slurm_job_id(self) -> int:
try:
job_id = os.environ['SLURM_JOB_ID']
job_id = int(job_id)
except Exception:
job_id = None
return job_id
@classmethod
def default_attributes(cls):
return vars(cls())
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:
"""Extend existing argparse by default `Trainer` attributes."""
parser = ArgumentParser(parents=[parent_parser])
trainer_default_params = Trainer.default_attributes()
for arg in trainer_default_params:
parser.add_argument('--{0}'.format(arg), default=trainer_default_params[arg], dest=arg)
return parser
@classmethod
def from_argparse_args(cls, args):
params = vars(args)
return cls(**params)
def __parse_gpu_ids(self, gpus):
"""Parse GPUs id.
:param list|str|int gpus: input GPU ids
:return list(int):
"""
# if gpus = -1 then use all available devices
# otherwise, split the string using commas
if gpus is not None:
if isinstance(gpus, list):
gpus = gpus
elif isinstance(gpus, str):
if gpus == '-1':
gpus = list(range(0, torch.cuda.device_count()))
else:
gpus = [int(x.strip()) for x in gpus.split(',')]
elif isinstance(gpus, int):
gpus = gpus
else:
raise ValueError('`gpus` has to be a string, int or list of ints')
return gpus
def __set_root_gpu(self, gpus):
if gpus is None:
return None
# set root gpu
root_gpu = 0
if isinstance(gpus, list):
root_gpu = gpus[0]
return root_gpu
@property
def num_gpus(self) -> int:
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
return len(gpus)
@property
def data_parallel(self) -> bool:
return self.use_dp or self.use_ddp or self.use_ddp2
@property
def training_tqdm_dict(self) -> dict:
"""Read-only for tqdm metrics.
:return:
"""
ref_model = self.model if not self.data_parallel else self.model.module
return dict(**ref_model.get_tqdm_dict(), **self.tqdm_metrics)
@property
def tng_tqdm_dic(self):
"""Read-only for tqdm metrics.
:return: dictionary
.. warning:: .. deprecated:: 0.5.0
Use `training_tqdm_dict` instead. Will remove 0.8.0.
"""
warnings.warn("`tng_tqdm_dic` has renamed to `training_tqdm_dict` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
return self.training_tqdm_dict
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(
self,
model: LightningModule,
train_dataloader: Optional[DataLoader] = None,
val_dataloaders: Optional[DataLoader] = None,
test_dataloaders: Optional[DataLoader] = None
):
r"""
Runs the full optimization routine.
Args:
model: Model to fit.
train_dataloader: A Pytorch
DataLoader with training samples. If the model has
a predefined train_dataloader method this will be skipped.
val_dataloaders: Either a single
Pytorch Dataloader or a list of them, specifying validation samples.
If the model has a predefined val_dataloaders method this will be skipped
test_dataloaders: Either a single
Pytorch Dataloader or a list of them, specifying validation samples.
If the model has a predefined test_dataloaders method this will be skipped
Example::
# Option 1,
# Define the train_dataloader(), test_dataloader() and val_dataloader() fxs
# in the lightningModule
# RECOMMENDED FOR MOST RESEARCH AND APPLICATIONS TO MAINTAIN READABILITY
trainer = Trainer()
model = LightningModule()
trainer.fit(model)
# Option 2
# in production cases we might want to pass different datasets to the same model
# Recommended for PRODUCTION SYSTEMS
train, val, test = DataLoader(...), DataLoader(...), DataLoader(...)
trainer = Trainer()
model = LightningModule()
trainer.fit(model, train_dataloader=train,
val_dataloader=val, test_dataloader=test)
# Option 1 & 2 can be mixed, for example the training set can be
# defined as part of the model, and validation/test can then be
# feed to .fit()
"""
# bind logger
model.logger = self.logger
# set up the passed in dataloaders (if needed)
self.__set_fit_dataloaders(model, train_dataloader, val_dataloaders, test_dataloaders)
# route to appropriate start method
# when using multi-node or DDP within a node start each module in a separate process
if self.use_ddp2:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
elif self.use_ddp:
if self.is_slurm_managing_tasks:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
else:
self.__set_random_port()
# track for predict
self.model = model
# train
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
# load weights if not interrupted
self.load_spawn_weights(model)
self.model = model
# 1 gpu or dp option triggers training using DP module
# easier to avoid NCCL issues
elif self.use_dp:
self.dp_train(model)
elif self.single_gpu:
self.single_gpu_train(model)
elif self.use_tpu:
log.info(f'training on {self.num_tpu_cores} TPU cores')
# COLAB_GPU is an env var available by default in Colab environments.
start_method = 'fork' if os.getenv('COLAB_GPU') else 'spawn'
# track for predict
self.model = model
# train
xmp.spawn(self.tpu_train, args=(model,), nprocs=self.num_tpu_cores, start_method=start_method)
# load weights if not interrupted
self.load_spawn_weights(model)
self.model = model
# ON CPU
else:
# run through amp wrapper
if self.use_amp:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
self.run_pretrain_routine(model)
# return 1 when finished
# used for testing or when we need to know that training succeeded
return 1
def __set_random_port(self):
"""
When running DDP NOT managed by SLURM, the ports might collide
:return:
"""
try:
default_port = os.environ['MASTER_PORT']
except Exception:
import random
default_port = random.randint(10000, 19000)
os.environ['MASTER_PORT'] = str(default_port)
def __set_fit_dataloaders(self, model, train_dataloader, val_dataloaders, test_dataloaders):
# when dataloader is passed via fit, patch the train_dataloader
# functions to overwrite with these implementations
if train_dataloader is not None:
if not self.is_overriden('training_step', model):
m = 'You called .fit() with a train_dataloader but did not define training_step()'
raise MisconfigurationException(m)
model.train_dataloader = _PatchDataLoader(train_dataloader)
if val_dataloaders is not None:
if not self.is_overriden('validation_step', model):
m = 'You called .fit() with a val_dataloaders but did not define validation_step()'
raise MisconfigurationException(m)
model.val_dataloader = _PatchDataLoader(val_dataloaders)
if test_dataloaders is not None:
if not self.is_overriden('test_step', model):
m = 'You called .fit() with a test_dataloaders but did not define test_step()'
raise MisconfigurationException(m)
model.test_dataloader = _PatchDataLoader(test_dataloaders)
def init_optimizers(
self,
optimizers: Union[Optimizer, Tuple[List, List], List[Optimizer], Tuple[Optimizer]]
) -> Tuple[List, List]:
# single optimizer
if isinstance(optimizers, Optimizer):
return [optimizers], []
# two lists
if len(optimizers) == 2 and isinstance(optimizers[0], list):
optimizers, lr_schedulers = optimizers
lr_schedulers, self.reduce_lr_on_plateau_scheduler = self.configure_schedulers(lr_schedulers)
return optimizers, lr_schedulers
# single list or tuple
if isinstance(optimizers, (list, tuple)):
return optimizers, []
def configure_schedulers(self, schedulers: list):
for i, scheduler in enumerate(schedulers):
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
reduce_lr_on_plateau_scheduler = schedulers.pop(i)
return schedulers, reduce_lr_on_plateau_scheduler
return schedulers, None
def run_pretrain_routine(self, model: LightningModule):
"""Sanity check a few things before starting actual training.
Args:
model: The model to run sanity test on.
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# log hyper-parameters
if self.logger is not None:
# save exp to get started
if hasattr(ref_model, "hparams"):
self.logger.log_hyperparams(ref_model.hparams)
self.logger.save()
if self.use_ddp or self.use_ddp2:
dist.barrier()
# wait for all models to restore weights
if self.on_tpu and XLA_AVAILABLE:
# wait for all processes to catch up
torch_xla.core.xla_model.rendezvous("pl.Trainer.run_pretrain_routine")
# set up checkpoint callback
self.configure_checkpoint_callback()
# register auto-resubmit when on SLURM
self.register_slurm_signal_handlers()
# print model summary
# TODO: remove self.testing condition because model.summarize() is wiping out the weights
if self.proc_rank == 0 and self.weights_summary is not None and not self.testing:
if self.weights_summary in ['full', 'top']:
ref_model.summarize(mode=self.weights_summary)
else:
m = "weights_summary can be None, 'full' or 'top'"
raise MisconfigurationException(m)
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# restore training and model before hpc call
self.restore_weights(model)
# download the data and do whatever transforms we need
self.call_prepare_data(ref_model)
# when testing requested only run test and return
if self.testing:
# only load test dataloader for testing
# self.reset_test_dataloader(ref_model)
self.run_evaluation(test_mode=True)
return
# check if we should run validation during training
self.disable_validation = not self.is_overriden('validation_step') and not self.fast_dev_run
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
if not self.disable_validation and self.num_sanity_val_steps > 0:
self.reset_val_dataloader(ref_model)
# init progress bars for validation sanity check
pbar = tqdm(desc='Validation sanity check',
total=self.num_sanity_val_steps * len(self.val_dataloaders),
leave=False, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True)
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm(disable=True)
eval_results = self.evaluate(model,
self.val_dataloaders,
self.num_sanity_val_steps,
False)
_, _, _, callback_metrics, _ = self.process_output(eval_results)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
if self.enable_early_stop:
self.early_stop_callback.check_metrics(callback_metrics)
# init progress bar
pbar = tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True,
file=sys.stdout)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model: Optional[LightningModule] = None):
r"""
Separates from fit to make sure you never run on your test set until you want to.
Args:
model (:class:`.LightningModule`): The model to test.
Example::
# Option 1
# run test after fitting
trainer = Trainer()
model = LightningModule()
trainer.fit()
trainer.test()
# Option 2
# run test from a loaded model
model = LightningModule.load_from_checkpoint('path/to/checkpoint.ckpt')
trainer = Trainer()
trainer.test(model)
"""
self.testing = True
if model is not None:
self.model = model
self.fit(model)
elif self.use_ddp or self.use_tpu:
# attempt to load weights from a spawn
path = os.path.join(self.default_save_path, '__temp_weight_ddp_end.ckpt')
test_model = self.model
if os.path.exists(path):
test_model = self.load_spawn_weights(self.model)
self.fit(test_model)
else:
self.run_evaluation(test_mode=True)
class _PatchDataLoader(object):
r'''
Callable object for patching dataloaders passed into trainer.fit().
Use this class to override model.*_dataloader() and be pickle-compatible.
Args:
dataloader: Dataloader object to return when called.
'''
def __init__(self, dataloader: Union[List[DataLoader], DataLoader]):
self.dataloader = dataloader
def __call__(self) -> Union[List[DataLoader], DataLoader]:
return self.dataloader
def _set_dataloader(model, dataloader, attribute):
r'''
Check dataloaders passed to .fit() method if they are pytorch DataLoader
objects and whether or not we should overright the corresponding dataloader
in the model
Args:
model (LightningModule): The model to check
dataloader: If a pytorch dataloader (or a list of pytorch dataloaders)
is passed, it will be incorporate into the model as model.attribute.
If attribute alreay exist it will warn the userpass. If not a
dataloader will throw an error
attribute (str): The attribute to save the dataloader under
'''
# Check if attribute comes directly from base class or
# derived in user subclass
if LightningModule.__qualname__ in getattr(model, attribute).__qualname__:
# Val and test should be list of dataloaders
dataloader = dataloader if attribute == 'train_dataloader' or \
(attribute != 'train_dataloader' and isinstance(dataloader, list)) else [dataloader]
# Check we are given valid dataloaders
is_dataloader = isinstance(dataloader, torch.utils.data.DataLoader)
is_dataloader_list = isinstance(dataloader, list)
valid_loaders = None
if is_dataloader_list:
valid_loaders = all(isinstance(d, torch.utils.data.DataLoader) for d in dataloader)
if is_dataloader or is_dataloader_list and valid_loaders:
# Overwrite abstract methods
def dl():
return dataloader
dl.__name__ = attribute
setattr(model, attribute, dl)
elif dataloader and dataloader != [None]:
raise ValueError(f'`{attribute}` needs to be an instance of '
'`torch.utils.data.DataLoader` or a list of '
'DataLoaders, instead got %r`' % dataloader)
elif dataloader: # if default (None) is passed, do not warn the user
warnings.warn(f'Model has predefined `{attribute}`,'
f' will skip `{attribute}={dataloader}` passed to fit method.')
|
the-stack_106_29995 | # pylint: disable=too-many-arguments, too-many-locals
""" Variational inference """
import math
import functools
from collections import namedtuple
from collections import OrderedDict
from scipy.special import gammaln
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
VariationalParameter = namedtuple('VariationalParameter',
['mean', 'rho', 'eps'])
def evaluate(variational_parameter):
""" Evaluates the current value of a variational parameter.
Returns mean + log(1 + e^rho) * eps
:args variational_parameter: the variational parameter
:returns: the value of the variational parameter
"""
assert isinstance(variational_parameter, VariationalParameter), \
"Incorrect type."
return variational_parameter.mean + \
(1 + variational_parameter.rho.exp()).log() * variational_parameter.eps
def rebuild_parameters(dico, module, epsilon_setting):
""" Rebuild parameters.
Build the computational graph corresponding to
the computations of the parameters of the given module,
using the corresponding variational parameters in dico,
and the rule used to sample epsilons. If the module has
submodules, corresponding subcomputational graphs are also
built.
Typically, if a module has a parameter weight, weight should
appear in dico, and the parameter will be rebuilt as
module.weight = dico['weight'].mean + (1+dico['weight'].rho.exp()).log() *
dico['weight'].eps
:args dico: a 'tree' dictionnary that contains variational
parameters for the current module, and subtrees for submodules
:args module: the module whose parameters are to be rebuilt
:args epsilon_settings: how epsilons ought to be drawn
"""
for name, p in dico.items():
if isinstance(p, VariationalParameter):
if p.eps is None:
dico[name] = p._replace(eps=Variable(p.mean.data.clone()))
epsilon_setting(name, dico[name])
setattr(module, name, evaluate(dico[name]))
elif p is None:
setattr(module, name, None)
else:
rebuild_parameters(p, getattr(module, name), epsilon_setting)
def prior_std(p):
""" Compute a reasonable prior standard deviation for parameter p.
:args p: the parameter
:return: the resulting std
"""
stdv = 1
if p.dim() > 1:
for i in range(p.dim() - 1):
stdv = stdv * p.size()[i + 1]
stdv = 1 / math.sqrt(stdv)
else:
stdv = 1e-2
return stdv
def sub_prior_loss_NN(dico):
# NN = Normal-Normal
""" Compute the KL divergence between prior and parameters for
all Variational Parameters in the tree dictionary dico.
:args dico: tree dictionary
:return: KL divergence between prior and current
"""
loss = 0
for p in dico.values():
if isinstance(p, VariationalParameter):
mean = p.mean
std = (1 + p.rho.exp()).log()
std_prior = prior_std(mean)
loss += (-(std / std_prior).log() +
(std.pow(2) + mean.pow(2)) /
(2 * std_prior ** 2) - 1 / 2).sum()
else:
loss += sub_prior_loss_NN(p)
return loss
def sub_entropy(dico):
""" Compute the entropy of the parameters for all Variational
Parameters in the tree dictionary dico.
:args dico: tree dictionary
:returns: Entropy of the current distribution
"""
entropy = 0.
for _, p in dico.items():
if isinstance(p, VariationalParameter):
std = (1 + p.rho.exp()).log()
n = np.prod(std.size())
entropy += std.log().sum() + .5 * n * (1 + np.log(2 * np.pi))
else:
entropy += sub_entropy(p)
return entropy
def sub_conjprior(dico, alpha_0, beta_0, mu_0, kappa_0):
""" Compute an estimation of the KL divergence between the conjugate
prior and parameters for all Variational Parameters in the tree
dictionary dico.
:args dico: tree dictionary
:args alpha_0: hyperparameter of the conjugate prior
:args beta_0: hyperparameter of the conjugate prior
:args mu_0: hyperparameter of the conjugate prior
:args kappa_0: hyperparameter of the conjugate prior
:return: estimation of the KL divergence between prior and current
"""
logprior = 0.
for _, p in dico.items():
if isinstance(p, VariationalParameter):
theta = evaluate(p)
S = (theta.mean() - mu_0).norm() ** 2
V = (theta - theta.mean()).norm() ** 2
n = np.prod(theta.size())
alpha_n = alpha_0 + n / 2
kappa_n = kappa_0 + n
beta_n = beta_0 + V / 2 + S * (kappa_0 * n) / (2 * kappa_n)
logprior += - beta_n.log() * alpha_n + alpha_0 * np.log(beta_0) + \
gammaln(alpha_n) - gammaln(alpha_0) + \
.5 * np.log(kappa_0 / kappa_n) - .5 * n * np.log(2 * np.pi)
else:
logprior += sub_conjprior(
p, alpha_0, beta_0, mu_0, kappa_0)
return logprior
def sub_conjpriorknownmean(dico, mean, alpha_0, beta_0):
""" Compute an estimation of the KL divergence between the conjugate
prior when the mean is known and parameters for all Variational
Parameters in the tree dictionary dico.
:args dico: tree dictionary
:args mean: known mean for the conjugate prior
:args alpha_0: hyperparameter of the conjugate prior
:args beta_0: hyperparameter of the conjugate prior
:return: estimation of the KL divergence between prior and current
"""
logprior = 0.
for _, p in dico.items():
if isinstance(p, VariationalParameter):
theta = evaluate(p)
S = (theta - mean).norm() ** 2
n = np.prod(theta.size())
alpha_n = alpha_0 + n / 2
beta_n = beta_0 + S / 2
logprior += - beta_n.log() * alpha_n + \
gammaln(alpha_n) - gammaln(alpha_0) + \
alpha_0 * np.log(beta_0) - .5 * n * np.log(2 * np.pi)
else:
logprior += sub_conjpriorknownmean(
p, mean, alpha_0, beta_0)
return logprior
def sub_mixtgaussprior(dico, sigma_1, sigma_2, pi):
""" Compute an estimation of the KL divergence between the prior
defined by the mixture of two gaussian distributions
for all Variational Parameters in the tree dictionary dico.
More details on this prior and the notations can be found in :
"Weight Uncertainty in Neural Networks" Blundell et al, 2015
https://arxiv.org/pdf/1505.05424.pdf
:args dico: tree dictionary
:args sigma_1: std of the first gaussian in the mixture
:args sigma_2: std of the second gaussian in the mixture
:args pi: probability of the first gaussian in the mixture
:return: estimation of the KL divergence between prior and current
"""
logprior = 0.
for _, p in dico.items():
if isinstance(p, VariationalParameter):
theta = evaluate(p)
n = np.prod(theta.size())
theta2 = theta ** 2
pgauss1 = (- theta2 / (2. * sigma_1 ** 2)).exp() / sigma_1
pgauss2 = (- theta2 / (2. * sigma_2 ** 2)).exp() / sigma_2
logprior += (pi * pgauss1 + (1 - pi) * pgauss2 + 1e-8).log().sum()
logprior -= n / 2 * np.log(2 * np.pi)
else:
logprior += sub_mixtgaussprior(
p, sigma_1, sigma_2, pi)
return logprior
class Variationalize(nn.Module):
""" Build a Variational model over the model given as input.
Variationalize changes all parameters of the given model
to allow learning of a gaussian distribution over the
parameters using Variational inference. For more information,
see e.g. https://papers.nips.cc/paper/4329-practical-variational
-inference-for-neural-networks.pdf.
:args model: the model on which VI is to be performed
:args epsilon_setting: function drawing randomly its argument according to
the centered and normalized distribution in the family of posteriors
:args zero_mean: if True, sets initial mean to 0, else
keep model initial mean
:args learn_mean: if True, learn the posterior mean
:args learn_rho: if True, learn the posterior rho
"""
def __init__(self, model, epsilon_setting, sub_prior_loss = None, \
zero_mean=True, learn_mean=True, learn_rho=True):
super().__init__()
self.model = model
self.epsilon_setting = epsilon_setting
self.dico = OrderedDict()
self._variationalize_module(self.dico, self.model, '', zero_mean,
learn_mean, learn_rho)
"""
self._prior_loss_function = functools.partial(
sub_prior_loss,
dico=self.dico)
"""
def _variationalize_module(self, dico, module, prefix, zero_mean,
learn_mean, learn_rho):
to_erase = []
paras = module._parameters.items() # pylint: disable=protected-access
for name, p in paras:
if p is None:
dico[name] = None
else:
stdv = prior_std(p)
init_rho = math.log(math.exp(stdv) - 1)
init_mean = p.data.clone()
if zero_mean:
init_mean.fill_(0)
dico[name] = VariationalParameter(
Parameter(init_mean),
Parameter(p.data.clone().fill_(init_rho)),
None)
if learn_mean:
self.register_parameter(prefix + '_' + name + '_mean',
dico[name].mean)
if learn_rho:
self.register_parameter(prefix + '_' + name + '_rho',
dico[name].rho)
to_erase.append(name)
for name in to_erase:
delattr(module, name)
for mname, sub_module in module.named_children():
sub_dico = OrderedDict()
self._variationalize_module(sub_dico, sub_module,
prefix + ('_' if prefix else '') +
mname, zero_mean,
learn_mean, learn_rho)
dico[mname] = sub_dico
def set_prior(self, prior_type, **prior_parameters):
""" Change the prior to be used.
Available priors are 'gaussian', 'conjugate', 'mixtgauss' and
'conjugate_known_mean'. For each prior, you must
specify the corresponding parameter:
- For the gaussian prior, no parameter is required.
- For the conjugate prior, you must specify
- n_mc_samples, the number of samples used in the Monte Carlo
estimation of the prior loss and its gradient.
- mu_0, the prior sample mean
- kappa_0, the number of samples used to estimate the
prior sample mean
- alpha_0 and beta_0, such that variance was estimated from 2
alpha_0 observations with sample mean mu_0 and sum of squared
deviations 2 beta_0
- For the conjugate prior with known mean,
- n_mc_samples, the number of samples used in the Monte Carlo
estimation of the prior loss and its gradient.
- mean, the known mean
- alpha_0 and beta_0 defined as above
- For the mixture of two gaussians,
- n_mc_samples, the number of samples used in the Monte Carlo
estimation of the prior loss and its gradient.
- sigma_1 and sigma_2 the std of the two gaussians
- pi the probability of the first gaussian
For further information, see:
https://en.wikipedia.org/wiki/Conjugate_prior.
Acts inplace by modifying the value of _prior_loss_function
:args prior_type: one of 'gaussian', 'conjugate',
'conjugate_known_mean', 'mixtgauss'
:args prior_parameters: the parameters for the associated prior
"""
if prior_type == 'gaussian':
self._prior_loss_function = functools.partial(
sub_prior_loss_NN,
dico=self.dico)
else:
n_mc_samples = prior_parameters.pop("n_mc_samples")
if prior_type == 'conjugate':
mc_logprior_function = functools.partial(
sub_conjprior,
**prior_parameters
)
if prior_type == 'conjugate_known_mean':
mc_logprior_function = functools.partial(
sub_conjpriorknownmean,
**prior_parameters
)
if prior_type == 'mixtgauss':
mc_logprior_function = functools.partial(
sub_mixtgaussprior,
**prior_parameters
)
def prior_loss_function():
"""Compute the prior loss"""
logprior = 0.
for _ in range(n_mc_samples):
rebuild_parameters(
self.dico, self.model,
lambda name, p: p.eps.data.normal_()
)
logprior += mc_logprior_function(self.dico)
logprior = logprior / n_mc_samples
H = sub_entropy(self.dico)
prior_loss = - logprior - H
return prior_loss
self._prior_loss_function = prior_loss_function
def forward(self, *inputs):
def _epsilon_setting(name, p): # pylint: disable=unused-argument
if self.training:
return self.epsilon_setting(p)
#return p.eps.data.normal_()
return p.eps.data.zero_()
rebuild_parameters(self.dico, self.model, _epsilon_setting)
return self.model(*inputs)
def prior_loss(self):
""" Returns the prior loss """
return self._prior_loss_function()
class Sample(nn.Module):
""" Utility to sample a single model from a Variational Model.
Sample is a decorator that wraps a variational model, sample
a model from the current parameter distribution and make the
model usable as any other pytorch model. The sample can be
redrawn using the draw() method. Draw needs to be called
once before the model can be used.
:args var_model: Variational model from which the sample models
are to be drawn
"""
def __init__(self, var_model):
super().__init__()
self.var_model = var_model
self.association = []
def draw(self, association=None, var_dico=None):
""" Draw a single model from the posterior variationally learned """
if association is None:
self.association = []
association = self.association
var_dico = self.var_model.dico
for name, p in var_dico.items():
if isinstance(p, VariationalParameter):
if p.eps is None:
var_dico[name] = p._replace(eps=Variable(
p.mean.data.clone()))
association.append((var_dico[name].eps,
var_dico[name].eps.data.clone().normal_()))
else:
self.draw(association, p)
def forward(self, *inputs):
for p, drawn_value_p in self.association:
p.data.copy_(drawn_value_p)
def _epsilon_setting(name, p): # pylint: disable=unused-argument
return 1
rebuild_parameters(self.var_model.dico, self.var_model.model,
_epsilon_setting)
return self.var_model.model(*inputs)
|
the-stack_106_29996 | from __future__ import print_function
from __future__ import absolute_import
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import scipy.sparse
import subprocess
import math
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
import pickle
from .imdb import imdb
from .imdb import ROOT_DIR
from . import ds_utils
from .voc_eval import voc_eval
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from utils.config import cfg
from utils.xml import convert_xml
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
# <<<< obsolete
class pascal_voc_withweak(imdb):
def __init__(self, image_set, strong_image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_withweak_withimagelabel_' + year + '_' + image_set + '_' + strong_image_set)
self._year = year
self._image_set = image_set
self._strong_image_set = strong_image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index(self._image_set)
self._strong_image_index = self._load_image_set_index(self._strong_image_set)
self._roidb_handler = self._load_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
self._proposal_prefix = 'trainval' if 'trainval' in self._image_set else 'test'
print('Initilizing VOC imdb')
print('Proposal prefix is {}'.format(self._proposal_prefix))
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None,
'min_size': 2}
# self._proposal_method = 'edge_boxes' # proposal_method
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_index_at(self, i):
return self._image_index[i]
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return i
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self, image_set):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit')
def _load_roidb(self):
cache_file = os.path.join(self.cache_path, self.name + '_fast_eb_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self._load_gt_roidb()
proposal_roidb = self._load_edge_boxes_proposal_roidb()
for gt_db, pro_db in zip(gt_roidb, proposal_roidb):
assert gt_db['index'] == pro_db['index']
gt_db['rois'] = pro_db['rois'].copy()
# append image id and image path to roidb
for i in range(len(self._image_index)):
gt_roidb[i]['img_id'] = self.image_id_at(i)
gt_roidb[i]['image'] = self.image_path_at(i)
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_edge_boxes_proposal_roidb(self):
print('loading edge boxes proposals')
full_image_index_file = os.path.join(self._devkit_path, 'VOC' + self._year, 'ImageSets', 'Main', self._proposal_prefix + '.txt')
proposal_matpath = os.path.join(cfg.DATA_DIR, 'edge_boxes_data', 'voc_' + self._year + '_' + self._proposal_prefix + '.mat')
raw_data = sio.loadmat(proposal_matpath)['boxes'][0].ravel()
box_list = []
for i in range(raw_data.shape[0]):
if i % 500 == 499:
print('processing edge boxes %d' % (i))
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
# keep = ds_utils.unique_boxes(boxes)
# boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
# if boxes.shape[0] > 2000:
# boxes = boxes[:2000]
box_list.append(boxes)
with open(full_image_index_file) as f:
full_image_set_index = [x.strip() for x in f.readlines()]
full_boxes = {}
for i, index in enumerate(full_image_set_index):
full_boxes[index] = box_list[i]
eb_roidb = []
image_index = self._load_image_set_index(self._image_set)
for i, index in enumerate(image_index):
eb_boxes = np.array(full_boxes[index], dtype=np.uint16)
roi_rec = {'index': index,
'rois': eb_boxes}
eb_roidb.append(roi_rec)
return eb_roidb
def _load_gt_roidb(self):
"""
"""
gt_roidb = [self._load_pascal_annotation(index)
for index in self._image_index]
return gt_roidb
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
# strong gt boxes
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
height = int(tree.find('size').find('height').text)
width = int(tree.find('size').find('width').text)
# load image-level label
if True:
# only access bbox annotation for images in strong_image_index
objs = tree.findall('object')
image_classes = np.zeros((len(self._classes)), dtype=np.int32)
for ix, obj in enumerate(objs):
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
image_classes[cls] = 1
if index in self._strong_image_index:
# only access bbox annotation for images in strong_image_index
objs = tree.findall('object')
# if not self.config['use_diff']:
# Exclude the samples labeled as difficult
# non_diff_objs = [
# obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
# objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
ishards = np.zeros((num_objs), dtype=np.int32)
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
diffc = obj.find('difficult')
difficult = 0 if diffc is None else int(diffc.text)
ishards[ix] = difficult
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
else:
num_objs = 0
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
ishards = np.zeros((num_objs), dtype=np.int32)
# weak gt boxes
filename = os.path.join(self._data_path, 'WEAKAnnotations', index + '.xml')
if os.path.exists(filename):
tree = ET.parse(filename)
weak_objs = tree.findall('object')
weak_num_objs = len(weak_objs)
weak_boxes = np.zeros((weak_num_objs, 4), dtype=np.uint16)
weak_gt_classes = np.zeros((weak_num_objs), dtype=np.int32)
for ix, obj in enumerate(weak_objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
weak_boxes[ix, :] = [x1, y1, x2, y2]
weak_gt_classes[ix] = cls
else:
weak_num_objs = 0
weak_boxes = np.zeros((weak_num_objs, 4), dtype=np.uint16)
weak_gt_classes = np.zeros((weak_num_objs), dtype=np.int32)
return {'index': index,
'boxes': boxes,
'gt_classes': gt_classes,
'image_classes': image_classes,
'weak_boxes': weak_boxes,
'weak_gt_classes': weak_gt_classes,
'gt_ishard': ishards,
'flipped': False,
'height': height,
'width': width,
}
def append_flipped_images(self):
"""Only flip boxes coordinates, images will be flipped when loading into network"""
print('%s append flipped images to roidb' % self._name)
roidb_flipped = []
for roi_rec in self.roidb:
# flip gt boxes
boxes = roi_rec['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = roi_rec['width'] - oldx2 - 1
boxes[:, 2] = roi_rec['width'] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
# flip rois
rois = roi_rec['rois'].copy()
rois_oldx1 = rois[:, 0].copy()
rois_oldx2 = rois[:, 2].copy()
rois[:, 0] = roi_rec['width'] - rois_oldx2 - 1
rois[:, 2] = roi_rec['width'] - rois_oldx1 - 1
assert (rois[:, 2] >= rois[:, 0]).all()
# flip weak boxes
wboxes = roi_rec['weak_boxes'].copy()
woldx1 = wboxes[:, 0].copy()
woldx2 = wboxes[:, 2].copy()
wboxes[:, 0] = roi_rec['width'] - woldx2 - 1
wboxes[:, 2] = roi_rec['width'] - woldx1 - 1
assert (wboxes[:, 2] >= wboxes[:, 0]).all()
roi_rec_flipped = roi_rec.copy()
roi_rec_flipped['boxes'] = boxes
roi_rec_flipped['weak_boxes'] = wboxes
roi_rec_flipped['rois'] = rois
roi_rec_flipped['flipped'] = True
roidb_flipped.append(roi_rec_flipped)
self._roidb.extend(roidb_flipped)
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
# status = subprocess.call(cmd, shell=True)
def _get_refine_annotations(self, all_boxes):
import cv2
n_images = len(all_boxes[0])
refine_bboxs = [[] for im_ind in range(n_images)]
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
for im_ind, dets in enumerate(all_boxes[cls_ind]):
dets = all_boxes[cls_ind][im_ind]
for k in range(dets.shape[0]):
bbox = [dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1]
confidence = dets[k, -1]
if confidence > 0.9:
refine_bboxs[im_ind].append([bbox[0], bbox[1], bbox[2], bbox[3], cls_ind])
for im_ind in range(n_images):
image_id = self._image_index[im_ind]
image_fn = self.roidb[im_ind]['image']
org_xml = os.path.join(self._data_path, 'Annotations', image_id + '.xml')
new_xml = org_xml.replace('Annotations', 'tempAnnotations')
image = np.array(cv2.imread(image_fn))
im_info = image.shape[:2]
if len(refine_bboxs[im_ind]) > 0:
convert_xml(org_xml, new_xml, refine_bboxs[im_ind], im_info)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
# self._get_refine_annotations(all_boxes)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
pass
|
the-stack_106_29997 | from causal_world.task_generators.base_task import BaseTask
import numpy as np
from causal_world.configs.world_constants import WorldConstants
class ReachingTaskGenerator(BaseTask):
def __init__(self, variables_space='space_a_b',
fractional_reward_weight=1,
dense_reward_weights=np.array([100000,0, 0, 0]),
default_goal_60=np.array([0, 0, 0.10]),
default_goal_120=np.array([0, 0, 0.13]),
default_goal_300=np.array([0, 0, 0.16]),
joint_positions=None,
activate_sparse_reward=False):
"""
This task generator will generate a task for reaching.
:param variables_space: (str) space to be used either 'space_a' or
'space_b' or 'space_a_b'
:param fractional_reward_weight: (float) weight multiplied by the
fractional volumetric
overlap in the reward.
:param dense_reward_weights: (list float) specifies the reward weights
for all the other reward
terms calculated in the
calculate_dense_rewards
function.
:param default_goal_60: (nd.array) the position of the goal for first
finger, x, y, z.
:param default_goal_120: (nd.array) the position of the goal for second
finger, x, y, z.
:param default_goal_300: (nd.array) the position of the goal for third
finger, x, y, z.
:param joint_positions: (nd.array) specifies the joints position to start
the episode with. None if the default
to be used.
:param activate_sparse_reward: (bool) specified if you want to
sparsify the reward by having
+1 or 0 if the mean distance
from goal is < 0.01.
"""
super().__init__(task_name="reaching",
variables_space=variables_space,
fractional_reward_weight=fractional_reward_weight,
dense_reward_weights=dense_reward_weights,
activate_sparse_reward=activate_sparse_reward)
self._task_robot_observation_keys = ["time_left_for_task",
"joint_positions",
"joint_velocities",
"end_effector_positions"]
self._task_params['default_goal_60'] = default_goal_60
self._task_params['default_goal_120'] = default_goal_120
self._task_params['default_goal_300'] = default_goal_300
self._task_params["joint_positions"] = joint_positions
self.previous_end_effector_positions = None
self.previous_joint_velocities = None
self.current_number_of_obstacles = 0
def _set_up_stage_arena(self):
"""
:return:
"""
creation_dict = {
'name': "goal_60",
'shape': "sphere",
'color': np.array([1, 0, 0]),
'position': self._task_params['default_goal_60']
}
self._stage.add_silhoutte_general_object(**creation_dict)
creation_dict = {
'name': "goal_120",
'shape': "sphere",
'color': np.array([0, 1, 0]),
'position': self._task_params['default_goal_120']
}
self._stage.add_silhoutte_general_object(**creation_dict)
creation_dict = {
'name': "goal_300",
'shape': "sphere",
'color': np.array([0, 0, 1]),
'position': self._task_params['default_goal_300']
}
self._stage.add_silhoutte_general_object(**creation_dict)
self._task_stage_observation_keys = [
"goal_60_cartesian_position",
"goal_120_cartesian_position",
"goal_300_cartesian_position"
]
return
def get_description(self):
"""
:return: (str) returns the description of the task itself.
"""
return \
"Task where the goal is to reach a " \
"goal point for each finger"
def _calculate_dense_rewards(self, desired_goal, achieved_goal):
"""
:param desired_goal:
:param achieved_goal:
:return:
"""
end_effector_positions_goal = desired_goal
current_end_effector_positions = achieved_goal
previous_dist_to_goal = np.linalg.norm(
end_effector_positions_goal - self.previous_end_effector_positions)
current_dist_to_goal = np.linalg.norm(end_effector_positions_goal -
current_end_effector_positions)
rewards = list()
rewards.append(previous_dist_to_goal - current_dist_to_goal)
rewards.append(-current_dist_to_goal)
rewards.append(
-np.linalg.norm(self._robot.get_latest_full_state()['torques']))
rewards.append(-np.linalg.norm(np.abs(self._robot.get_latest_full_state(
)['velocities'] - self.previous_joint_velocities),
ord=2))
update_task_info = {
'current_end_effector_positions':
current_end_effector_positions,
'current_velocity':
self._robot.get_latest_full_state()['velocities']
}
return rewards, update_task_info
def _update_task_state(self, update_task_info):
"""
:param update_task_info:
:return:
"""
self.previous_end_effector_positions = \
update_task_info['current_end_effector_positions']
self.previous_joint_velocities = \
update_task_info['current_velocity']
return
def _set_task_state(self):
"""
:return:
"""
self.previous_end_effector_positions = \
self._robot.get_latest_full_state()['end_effector_positions']
self.previous_joint_velocities = \
self._robot.get_latest_full_state()['velocities']
return
def get_desired_goal(self):
"""
:return: (nd.array) specifies the desired goal as array of all three
positions of the finger goals.
"""
desired_goal = np.array([])
desired_goal = np.append(
desired_goal,
self._stage.get_object_state('goal_60', 'cartesian_position'))
desired_goal = np.append(
desired_goal,
self._stage.get_object_state('goal_120', 'cartesian_position'))
desired_goal = np.append(
desired_goal,
self._stage.get_object_state('goal_300', 'cartesian_position'))
return desired_goal
def get_achieved_goal(self):
"""
:return: (nd.array) specifies the achieved goal as concatenated
end-effector positions.
"""
achieved_goal = self._robot.get_latest_full_state(
)['end_effector_positions']
return np.array(achieved_goal)
def _goal_reward(self, achieved_goal, desired_goal):
"""
:param achieved_goal:
:param desired_goal:
:return:
"""
current_end_effector_positions = achieved_goal
current_dist_to_goal = np.abs(desired_goal -
current_end_effector_positions)
current_dist_to_goal_mean = np.mean(current_dist_to_goal)
return np.array(current_dist_to_goal_mean)
def _check_preliminary_success(self, goal_reward):
"""
:param goal_reward:
:return:
"""
if goal_reward < 0.01:
return True
else:
return False
def _calculate_fractional_success(self, goal_reward):
"""
:param goal_reward:
:return:
"""
clipped_distance = np.clip(goal_reward, 0.01, 0.03)
distance_from_success = clipped_distance - 0.01
fractional_success = 1 - (distance_from_success / 0.02)
return fractional_success
def get_info(self):
"""
:return: (dict) returns the info dictionary after every step of the
environment.
"""
info = dict()
info['desired_goal'] = self._current_desired_goal
info['achieved_goal'] = self._current_achieved_goal
info['success'] = self._task_solved
if self._is_ground_truth_state_exposed:
info['ground_truth_current_state_varibales'] = \
self.get_current_variable_values()
if self._is_partial_solution_exposed:
info['possible_solution_intervention'] = dict()
info['possible_solution_intervention']['joint_positions'] = \
self._robot.get_joint_positions_from_tip_positions(
self._current_desired_goal,
self._robot.get_latest_full_state()['positions'])
info['fractional_success'] =\
self._calculate_fractional_success(self._current_goal_reward)
return info
def _set_intervention_space_a(self):
"""
:return:
"""
super(ReachingTaskGenerator, self)._set_intervention_space_a()
self._intervention_space_a['number_of_obstacles'] = \
np.array([1, 5])
return
def _set_intervention_space_b(self):
"""
:return:
"""
super(ReachingTaskGenerator, self)._set_intervention_space_b()
self._intervention_space_b['number_of_obstacles'] = \
np.array([1, 5])
return
def get_task_generator_variables_values(self):
"""
:return: (dict) specifying the variables belonging to the task itself.
"""
task_generator_variables = dict()
task_generator_variables['number_of_obstacles'] = \
self.current_number_of_obstacles
return task_generator_variables
def apply_task_generator_interventions(self, interventions_dict):
"""
:param interventions_dict: (dict) variables and their corresponding
intervention value.
:return: (tuple) first position if the intervention was successful or
not, and second position indicates if
observation_space needs to be reset.
"""
if len(interventions_dict) == 0:
return True, False
reset_observation_space = False
if "number_of_obstacles" in interventions_dict:
if int(interventions_dict["number_of_obstacles"]
) > self.current_number_of_obstacles:
reset_observation_space = True
for i in range(self.current_number_of_obstacles,
int(interventions_dict["number_of_obstacles"])):
self._stage.add_rigid_general_object(
name="obstacle_" + str(i),
shape="static_cube",
size=np.array([0.01, 0.01, 0.01]),
color=np.array([0, 0, 0]),
position=np.random.uniform(WorldConstants.ARENA_BB[0],
WorldConstants.ARENA_BB[1]))
self.current_number_of_obstacles += 1
self._task_stage_observation_keys.append("obstacle_" +
str(i) + "_type")
self._task_stage_observation_keys.append("obstacle_" +
str(i) + "_size")
self._task_stage_observation_keys.append(
"obstacle_" + str(i) + "_cartesian_position")
self._task_stage_observation_keys.append("obstacle_" +
str(i) +
"_orientation")
else:
return True, reset_observation_space
else:
raise Exception("this task generator variable "
"is not yet defined")
self._set_intervention_space_b()
self._set_intervention_space_a()
self._set_intervention_space_a_b()
self._stage.finalize_stage()
return True, reset_observation_space
|
the-stack_106_30001 | #Assignment No: 4
#Problem Statement :Understanding the connectivity of Raspberry-Pi/Beagle board with
#temperature sensor. Write an application to read the environment temperature. If temperature
#crosses a threshold value, the application indicated user using LEDs.
#Name :Sameer Rathod
#TE B 58
import RPi.GPIO as GPIO
import time,sys
import datetime
import dht11
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setup(37,GPIO.OUT)
instance=dht11.DHT11(pin=5)
while True:
result=instance.read()
if result.is_valid():
print("Last valid input:"+ str(datetime.datetime.now()))
print("Temperature:%d C" %result.temperature)
print("Humidity:%d %%" %result.humidity)
if result.temperature>=23:
GPIO.output(37,True)
print('-------------temperature above 25 alert------------')
else :
GPIO.output(37,False)
time.sleep(1)
GPIO.cleanup()
sys.exit(0)
|
the-stack_106_30004 | from __future__ import absolute_import, print_function, division
import unittest
from pony import orm
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
class TestIntConverter1(unittest.TestCase):
def setUp(self):
self.db = db = orm.Database()
class Foo(db.Entity):
id = orm.PrimaryKey(int)
x = orm.Required(int, size=8, unsigned=True)
setup_database(db)
with orm.db_session:
foo = Foo(id=123, x=1)
def tearDown(self):
teardown_database(self.db)
def test_1(self):
with orm.db_session:
foo = self.db.Foo[123]
foo.x -= 1
with orm.db_session:
foo = self.db.Foo[123]
self.assertEqual(foo.x, 0)
@raises_exception(ValueError, "Value -1 of attr Foo.x is less than the minimum allowed value 0")
@orm.db_session
def test_2(self):
foo = self.db.Foo[123]
foo.x -= 2
@orm.db_session
def test_3(self):
with orm.db_session:
foo = self.db.Foo[123]
foo.x += 254
with orm.db_session:
foo = self.db.Foo[123]
self.assertEqual(foo.x, 255)
@raises_exception(ValueError, "Value 256 of attr Foo.x is greater than the maximum allowed value 255")
@orm.db_session
def test_4(self):
foo = self.db.Foo[123]
foo.x += 255
|
the-stack_106_30005 | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import logging
import pathlib
from .command import Command
class AppImageToolCommand(Command):
def __init__(self, app_dir, target_file):
super().__init__("appimagetool")
self.app_dir = pathlib.Path(app_dir).absolute()
self.runtime_file = None
self.update_information = None
self.guess_update_information = False
self.sign_key = None
self.target_file = pathlib.Path(target_file).absolute()
self.target_arch = None
def run(self):
logging.info("Generating AppImage from %s" % self.app_dir)
command = self._generate_command()
if self.target_arch:
self.env["ARCH"] = self.target_arch
self._run(command)
if self.return_code != 0:
logging.error("AppImage generation failed")
else:
logging.info("AppImage created successfully")
def _generate_command(self):
command = ["appimagetool"]
if self.runtime_file:
command.extend(["--runtime-file", self.runtime_file])
if self.sign_key:
command.extend(["--sign", "--sign-key", self.sign_key])
if self.update_information:
command.extend(["--updateinformation", self.update_information])
if self.guess_update_information:
command.extend(["--guess"])
# appstreamcli calls from
command.extend(["--no-appstream"])
command.extend([str(self.app_dir), str(self.target_file)])
return command
|
the-stack_106_30009 | from .nn_layer import NNLayer
import tensorflow as tf
__all__ = ['LSTMLayer']
class LSTMLayer(NNLayer):
"""
This layer implements the LSTM cell.
"""
def __init__(self, input_dim, hidden_layer_size, input_layer):
"""Initialize LSTMLayer class.
Parameters
----------
input_dim : integer
Input dimensions
hidden_layer_size : integer
Size of the memory in LSTM cell
input_layer : layers object
Preceding layers object
"""
self.input_dim = input_dim
self.hidden_layer_size = hidden_layer_size
self.inputs = input_layer.get_outputs()
# Initializing the weights and biases
self.Wi = tf.Variable(tf.zeros([self.input_dim, self.hidden_layer_size]))
self.Ui = tf.Variable(tf.zeros([self.hidden_layer_size, self.hidden_layer_size]))
self.bi = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wf = tf.Variable(tf.zeros([self.input_dim, self.hidden_layer_size]))
self.Uf = tf.Variable(tf.zeros([self.hidden_layer_size, self.hidden_layer_size]))
self.bf = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wog = tf.Variable(tf.zeros([self.input_dim, self.hidden_layer_size]))
self.Uog = tf.Variable(tf.zeros([self.hidden_layer_size, self.hidden_layer_size]))
self.bog = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.Wc = tf.Variable(tf.zeros([self.input_dim, self.hidden_layer_size]))
self.Uc = tf.Variable(tf.zeros([self.hidden_layer_size, self.hidden_layer_size]))
self.bc = tf.Variable(tf.zeros([self.hidden_layer_size]))
self.initial_hidden = tf.zeros([1, self.hidden_layer_size])
self.initial_hidden= tf.stack([self.initial_hidden, self.initial_hidden])
def forward_step(self, previous_memory, input_):
"""
Generates the next forward LSTM operation.
Parameters
----------
previous_memory : list
List of the previous memory and hidden output tensors
input_ : tf.tensor
Input tensor
Returns
----------
list
New updated memory and hidden output tensors
"""
previous_hidden_state, c_prev = tf.unstack(previous_memory)
# Input gate
i= tf.sigmoid(
tf.matmul(input_,self.Wi)+tf.matmul(previous_hidden_state,self.Ui) + self.bi
)
# Forget Gate
f= tf.sigmoid(
tf.matmul(input_,self.Wf)+tf.matmul(previous_hidden_state,self.Uf) + self.bf
)
# Output Gate
o= tf.sigmoid(
tf.matmul(input_,self.Wog)+tf.matmul(previous_hidden_state,self.Uog) + self.bog
)
# New Memory Cell
c_= tf.nn.tanh(
tf.matmul(input_,self.Wc)+tf.matmul(previous_hidden_state,self.Uc) + self.bc
)
# Final Memory cell
c= f*c_prev + i*c_
# Current Hidden state
current_hidden_state = o*tf.nn.tanh(c)
return tf.stack([current_hidden_state,c])
# Function for getting all hidden state.
def get_outputs(self):
"""
Iterates through time/ sequence to get all hidden states.
Returns
----------
tf.Tensor
Output tensor
"""
# Getting all hidden state throuh time
inputs_shape = self.inputs.get_shape()
if inputs_shape[0] == 1:
self.inputs = tf.expand_dims(self.inputs[0, :, :], 1)
all_hidden_states = tf.scan(self.forward_step,
self.inputs,
initializer=self.initial_hidden,
name='states')
all_hidden_states = all_hidden_states[:, 0, :, :]
else:
all_hidden_states = tf.map_fn(self.get_batch_outputs,
self.inputs)
return all_hidden_states
def get_batch_outputs(self, single_input):
"""
Iterates through time/ sequence to get all hidden states for all
batches.
Returns
----------
tf.Tensor
Output tensor
"""
single_input = tf.expand_dims(single_input, 1)
all_hidden_states = tf.scan(self.forward_step,
single_input,
initializer=self.initial_hidden,
name='states')
all_hidden_states = all_hidden_states[:, 0, :, :]
return all_hidden_states
|
the-stack_106_30011 | import turtle
instructions = []
with open('day_12.txt') as f:
for line in f:
line = line.rstrip().lower()
instructions.append(line.rstrip())
# PART 1
def move_ship(instructions):
'''
Input is array of instructions - letter and number.
Letters stand for: n - north, s - south, e - east, w - west, l - left, r - right, f - forward.
Make appropriate moves with turtle - named ship.
Return ship's Manhattan distance (sum of the absolute values of its east/west position and its north/south position).
'''
ship = turtle.Turtle()
ship.speed(0)
for move in instructions:
number = int(move[1:])
letter = move[0]
if letter in 'nsew':
x = ship.xcor()
y = ship.ycor()
if letter == 'n':
y += number
elif letter == 's':
y -= number
elif letter == 'e':
x += number
else:
x -= number
ship.goto(x,y)
elif letter == 'r':
ship.rt(number)
elif letter == 'l':
ship.lt(number)
else: # f
ship.fd(number)
return abs(ship.xcor()) + abs(ship.ycor())
# PART 2
def rotate(instr, number, x_r, y_r):
'''Do rotations of relative distance between ship and waypoint'''
if number in (90, 270):
if number == 90 and instr == 'r' or number == 270 and instr == 'l':
x_r, y_r = y_r, -x_r
else:
x_r, y_r = -y_r, x_r
else: # 180
x_r, y_r = -x_r, -y_r
return x_r, y_r
def move_ship_and_waypoint(instructions):
'''
There are two turtles - named ship and waypoint.
With ship waypoint moves too, the waypoint starts 10 units east and 1 unit north relative to the ship.
Input is array of instructions - letter and number.
Letters stand for:
f - move ship in direction of waypoint (in number given times),
n - north / s - south / e - east / w - west - moves waypoint in number given,
r - right / l - left - rotates waypoint around ship
Return ship's Manhattan distance (sum of the absolute values of its east/west position and its north/south position).
'''
ship = turtle.Turtle()
waypoint = turtle.Turtle()
ship.speed(0)
waypoint.speed(0), waypoint.goto(10, 1) # origin => x = 10, y = 1
for move in instructions:
number = int(move[1:])
instr = move[0]
x_w = int(waypoint.xcor()) # waypoint coordinates
y_w = int(waypoint.ycor())
x_s = int(ship.xcor()) # ship coordinates
y_s = int(ship.ycor())
x_r = x_w - x_s # distance between ship and waypoint
y_r = y_w - y_s
if instr == 'f':
x_s += x_r * number
y_s += y_r * number
ship.goto(x_s, y_s)
x_w = x_s + x_r
y_w = y_s + y_r
waypoint.goto(x_w, y_w)
elif instr in 'nsew':
if instr == 'n':
y_w += number
elif instr == 's':
y_w -= number
elif instr == 'e':
x_w += number
else:
x_w -= number
waypoint.goto(x_w, y_w)
else:
x_r, y_r = rotate(instr, number, x_r, y_r)
x_w = x_s + x_r
y_w = y_s + y_r
waypoint.goto(x_w, y_w)
return abs(ship.xcor()) + abs(ship.ycor())
print(move_ship(instructions)) # part 1
print(move_ship_and_waypoint(instructions)) # part 2
|
the-stack_106_30013 | # -*- coding: utf-8 -*-
"""
S3 Charting Toolkit
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{NumPy}} <http://www.numpy.org>}
@requires: U{B{I{MatPlotLib}} <http://matplotlib.sourceforge.net>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Chart"]
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import current
from gluon.storage import Storage
from gluon.html import IMG
# =============================================================================
class S3Chart(object):
"""
Module for graphing
Currently a simple wrapper to matplotlib
"""
# This folder needs to be writable by the web2py process
CACHE_PATH = "/%s/static/cache/chart" % current.request.application
# -------------------------------------------------------------------------
def __init__(self, path, width=9, height=6):
"""
Create the base Figure object
@param: height x100px
@param: width x100px
"""
try:
# Causes deadlocking issues
# http://sjohannes.wordpress.com/2010/06/11/using-matplotlib-in-a-web-application/
#import matplotlib
#matplotlib.use("Agg")
#import matplotlib.pyplot as plt
#from pylab import savefig
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
self.FigureCanvas = FigureCanvas
from matplotlib.figure import Figure
self.Figure = Figure
MATPLOTLIB = True
except ImportError:
import sys
print >> sys.stderr, "WARNING: S3Chart unresolved dependency: matplotlib required for charting"
MATPLOTLIB = False
self.filename = path
self.width = width
self.height = height
self.asInt = False
if MATPLOTLIB:
self.fig = Figure(figsize=(width, height))
else:
self.fig = None
# -------------------------------------------------------------------------
@staticmethod
def getCachedPath(filename):
import os
path = "applications"
chartFile = "%s/%s.png" % (S3Chart.CACHE_PATH, filename)
fullPath = "%s%s" % (path, chartFile)
if os.path.exists(fullPath):
return chartFile
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def getCachedFile(filename):
"""
Return the opened cached file, if the file can't be found then
return None
"""
chartFile = S3Chart.getCachedPath(filename)
if chartFile:
try:
f = open(chartFile)
return f.read()
except:
# for some reason been unable to get the cached version
pass
return None
# -------------------------------------------------------------------------
@staticmethod
def storeCachedFile(filename, image):
"""
Save the file in the cache area, and return the path to this file
"""
path = "applications"
chartFile = "%s/%s.png" % (S3Chart.CACHE_PATH, filename)
fullPath = "%s%s" % (path, chartFile)
try:
f = open(fullPath, "w+")
print >> f, image
except:
return None
return chartFile
# -------------------------------------------------------------------------
@staticmethod
def purgeCache(prefix=None):
"""
Delete the files in the cache that match the file name prefix,
if the prefix is None then all files will be deleted
"""
import os
folder = "applications%s/" % S3Chart.CACHE_PATH
if os.path.exists(folder):
filelist = os.listdir(folder)
for file in filelist:
if prefix == None or file.startswith(prefix):
os.remove("%s%s" % (folder, file))
# -------------------------------------------------------------------------
def draw(self, output="xml"):
"""
Output the chart as a PNG embedded in an IMG tag
- used by the Delphi module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
# For interactive shell tests
#plt.show()
# For web response
#savefig(response.body)
chart = Storage()
chart.body = StringIO()
chart.headers = Storage()
chart.headers["Content-Type"] = "image/png"
canvas = self.FigureCanvas(fig)
canvas.print_figure(chart.body)
#return response.body.getvalue()
image = chart.body.getvalue()
# IE 8 and before has a 32K limit on URIs this can be quickly
# gobbled up if the image is too large. So the image will
# stored on the server and a URI used in the src
cachePath = self.storeCachedFile(self.filename, image)
if output == "xml":
if cachePath != None:
image = IMG(_src = cachePath)
else:
import base64
base64Img = base64.b64encode(image)
image = IMG(_src="data:image/png;base64,%s" % base64Img)
else:
current.response.headers["Content-Type"] = "image/png"
return image
# -------------------------------------------------------------------------
def survey_hist(self, title,
data, bins, min, max, xlabel=None, ylabel=None):
"""
Draw a Histogram
- used by the Survey module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
from numpy import arange
# Draw a histogram
ax = fig.add_subplot(111)
ax.hist(data, bins=bins, range=(min, max))
left = arange(0, bins + 1)
if self.asInt:
label = left * int(max / bins)
else:
label = left * max / bins
ax.set_xticks(label)
ax.set_xticklabels(label, rotation=30)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# -------------------------------------------------------------------------
def survey_pie(self, title, data, label):
"""
Draw a Pie Chart
- used by the Survey module
"""
fig = self.fig
if not fig:
return "Matplotlib not installed"
# Draw a pie chart
ax = fig.add_subplot(111)
ax.pie(data, labels=label)
ax.legend()
ax.set_title(title)
# -------------------------------------------------------------------------
def survey_bar(self, title, data, labels, legendLabels):
"""
Draw a Bar Chart
- used by the Survey module
"""
barColourList = ["#F2D7A0", "#7B77A8", "#69889A", "#9D7B34"]
barColourListExt = [(242, 215, 160),
(123, 118, 168),
(105, 136, 154),
(157, 123, 52)
]
fig = self.fig
if not fig:
return "Matplotlib not installed"
from numpy import arange
# Draw a bar chart
if not isinstance(data[0],list):
dataList = [data]
else:
dataList = data
legendColCnt = 3
cnt = len(labels)
dcnt = len(dataList)
lcnt = 0
if legendLabels != None:
lcnt = (len(legendLabels) + legendColCnt - 1) / legendColCnt
width = 0.9 / dcnt
offset = 0
gap = 0.1 / dcnt
bcnt = 0
bars = []
height = max(0.2, 0.85 - (0.04 * lcnt))
rect = [0.08, 0.08, 0.9, height]
ax = fig.add_axes(rect)
for data in dataList:
left = arange(offset, cnt + offset) # the x locations for the bars
if bcnt < 3:
colour = barColourList[bcnt]
else:
colour = []
colourpart = barColourListExt[bcnt%4]
divisor = 256.0 - (32 * bcnt/4)
if divisor < 0.0:
divisor = divisor * -1
for part in colourpart:
calc = part/divisor
while calc > 1.0:
calc -= 1
colour.append(calc)
plot = ax.bar(left, data, width=width, color=colour)
bars.append(plot[0])
bcnt += 1
offset += width + gap
left = arange(cnt)
lblAdjust = (1.0 - gap) * 0.5
if cnt <= 3:
angle = 0
elif cnt <= 10:
angle = -10
elif cnt <= 20:
angle = -30
else:
angle = -45
ax.set_xticks(left + lblAdjust)
try: # This function is only available with version 1.1 of matplotlib
ax.set_xticklabels(labels, rotation=angle)
ax.tick_params(labelsize=self.width)
except AttributeError:
newlabels = []
for label in labels:
if len(label) > 12:
label = label[0:10] + "..."
newlabels.append(label)
ax.set_xticklabels(newlabels)
ax.set_title(title)
if legendLabels != None:
fig.legend(bars,
legendLabels,
"upper left",
mode="expand",
ncol = legendColCnt,
prop={"size":10},
)
# END =========================================================================
|
the-stack_106_30014 | import logging
import operator
import os
from datetime import datetime, timedelta
from galaxy import util
from galaxy.util import unique_id
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import new_secure_hash
from galaxy.util.dictifiable import Dictifiable
import tool_shed.repository_types.util as rt_util
from tool_shed.dependencies.repository import relation_builder
from tool_shed.util import shed_util_common as suc
from mercurial import hg
from mercurial import ui
log = logging.getLogger( __name__ )
class APIKeys( object ):
pass
class User( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'username' )
dict_element_visible_keys = ( 'id', 'username' )
def __init__( self, email=None, password=None ):
self.email = email
self.password = password
self.external = False
self.deleted = False
self.purged = False
self.username = None
self.new_repo_alert = False
def all_roles( self ):
roles = [ ura.role for ura in self.roles ]
for group in [ uga.group for uga in self.groups ]:
for role in [ gra.role for gra in group.roles ]:
if role not in roles:
roles.append( role )
return roles
def check_password( self, cleartext ):
"""Check if 'cleartext' matches 'self.password' when hashed."""
return self.password == new_secure_hash( text_type=cleartext )
def get_disk_usage( self, nice_size=False ):
return 0
@property
def nice_total_disk_usage( self ):
return 0
def set_disk_usage( self, bytes ):
pass
total_disk_usage = property( get_disk_usage, set_disk_usage )
def set_password_cleartext( self, cleartext ):
"""Set 'self.password' to the digest of 'cleartext'."""
self.password = new_secure_hash( text_type=cleartext )
class PasswordResetToken( object ):
def __init__( self, user, token=None):
if token:
self.token = token
else:
self.token = unique_id()
self.user = user
self.expiration_time = datetime.now() + timedelta(hours=24)
class Group( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name' )
def __init__( self, name=None ):
self.name = name
self.deleted = False
class Role( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name', 'description', 'type' )
private_id = None
types = Bunch( PRIVATE='private',
SYSTEM='system',
USER='user',
ADMIN='admin',
SHARING='sharing' )
def __init__( self, name="", description="", type="system", deleted=False ):
self.name = name
self.description = description
self.type = type
self.deleted = deleted
@property
def is_repository_admin_role( self ):
# A repository admin role must always be associated with a repository. The mapper returns an
# empty list for those roles that have no repositories. This method will require changes if
# new features are introduced that results in more than one role per repository.
if self.repositories:
return True
return False
class UserGroupAssociation( object ):
def __init__( self, user, group ):
self.user = user
self.group = group
class UserRoleAssociation( object ):
def __init__( self, user, role ):
self.user = user
self.role = role
class GroupRoleAssociation( object ):
def __init__( self, group, role ):
self.group = group
self.role = role
class RepositoryRoleAssociation( object ):
def __init__( self, repository, role ):
self.repository = repository
self.role = role
class GalaxySession( object ):
def __init__( self,
id=None,
user=None,
remote_host=None,
remote_addr=None,
referer=None,
current_history=None,
session_key=None,
is_valid=False,
prev_session_id=None,
last_action=None ):
self.id = id
self.user = user
self.remote_host = remote_host
self.remote_addr = remote_addr
self.referer = referer
self.current_history = current_history
self.session_key = session_key
self.is_valid = is_valid
self.prev_session_id = prev_session_id
self.last_action = last_action or datetime.now()
class Repository( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name', 'type', 'remote_repository_url', 'homepage_url', 'description', 'user_id', 'private', 'deleted',
'times_downloaded', 'deprecated' )
dict_element_visible_keys = ( 'id', 'name', 'type', 'remote_repository_url', 'homepage_url', 'description', 'long_description', 'user_id', 'private',
'deleted', 'times_downloaded', 'deprecated' )
file_states = Bunch( NORMAL='n',
NEEDS_MERGING='m',
MARKED_FOR_REMOVAL='r',
MARKED_FOR_ADDITION='a',
NOT_TRACKED='?' )
def __init__( self, id=None, name=None, type=None, remote_repository_url=None, homepage_url=None,
description=None, long_description=None, user_id=None, private=False,
deleted=None, email_alerts=None, times_downloaded=0, deprecated=False ):
self.id = id
self.name = name or "Unnamed repository"
self.type = type
self.remote_repository_url = remote_repository_url
self.homepage_url = homepage_url
self.description = description
self.long_description = long_description
self.user_id = user_id
self.private = private
self.deleted = deleted
self.email_alerts = email_alerts
self.times_downloaded = times_downloaded
self.deprecated = deprecated
@property
def admin_role( self ):
admin_role_name = '%s_%s_admin' % ( str( self.name ), str( self.user.username ) )
for rra in self.roles:
role = rra.role
if str( role.name ) == admin_role_name:
return role
raise Exception( 'Repository %s owned by %s is not associated with a required administrative role.' %
( str( self.name ), str( self.user.username ) ) )
def allow_push( self, app ):
repo = hg.repository( ui.ui(), self.repo_path( app ) )
return repo.ui.config( 'web', 'allow_push' )
def can_change_type( self, app ):
# Allow changing the type only if the repository has no contents, has never been installed, or has
# never been changed from the default type.
if self.is_new( app ):
return True
if self.times_downloaded == 0:
return True
if self.type == rt_util.UNRESTRICTED:
return True
return False
def can_change_type_to( self, app, new_type_label ):
if self.type == new_type_label:
return False
if self.can_change_type( app ):
new_type = app.repository_types_registry.get_class_by_label( new_type_label )
if new_type.is_valid_for_type( app, self ):
return True
return False
def get_changesets_for_setting_metadata( self, app ):
type_class = self.get_type_class( app )
return type_class.get_changesets_for_setting_metadata( app, self )
def get_repository_dependencies( self, app, changeset, toolshed_url ):
# We aren't concerned with repositories of type tool_dependency_definition here if a
# repository_metadata record is not returned because repositories of this type will never
# have repository dependencies. However, if a readme file is uploaded, or some other change
# is made that does not create a new downloadable changeset revision but updates the existing
# one, we still want to be able to get repository dependencies.
repository_metadata = suc.get_current_repository_metadata_for_changeset_revision( app,
self,
changeset )
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
rb = relation_builder.RelationBuilder( app, self, repository_metadata, toolshed_url )
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
if repository_dependencies:
return repository_dependencies
return None
def get_type_class( self, app ):
return app.repository_types_registry.get_class_by_label( self.type )
def get_tool_dependencies( self, changeset_revision ):
for downloadable_revision in self.downloadable_revisions:
if downloadable_revision.changeset_revision == changeset_revision:
return downloadable_revision.metadata.get( 'tool_dependencies', [] )
return []
def installable_revisions( self, app, sort_revisions=True ):
return suc.get_metadata_revisions( self,
hg.repository( ui.ui(), self.repo_path( app ) ),
sort_revisions=sort_revisions )
def is_new( self, app ):
repo = hg.repository( ui.ui(), self.repo_path( app ) )
tip_ctx = repo.changectx( repo.changelog.tip() )
return tip_ctx.rev() < 0
def repo_path( self, app ):
return app.hgweb_config_manager.get_entry( os.path.join( "repos", self.user.username, self.name ) )
def revision( self, app ):
repo = hg.repository( ui.ui(), self.repo_path( app ) )
tip_ctx = repo.changectx( repo.changelog.tip() )
return "%s:%s" % ( str( tip_ctx.rev() ), str( repo.changectx( repo.changelog.tip() ) ) )
def set_allow_push( self, app, usernames, remove_auth='' ):
allow_push = util.listify( self.allow_push( app ) )
if remove_auth:
allow_push.remove( remove_auth )
else:
for username in util.listify( usernames ):
if username not in allow_push:
allow_push.append( username )
allow_push = '%s\n' % ','.join( allow_push )
repo = hg.repository( ui.ui(), path=self.repo_path( app ) )
# Why doesn't the following work?
# repo.ui.setconfig( 'web', 'allow_push', allow_push )
lines = repo.opener( 'hgrc', 'rb' ).readlines()
fp = repo.opener( 'hgrc', 'wb' )
for line in lines:
if line.startswith( 'allow_push' ):
fp.write( 'allow_push = %s' % allow_push )
else:
fp.write( line )
fp.close()
def tip( self, app ):
repo = hg.repository( ui.ui(), self.repo_path( app ) )
return str( repo.changectx( repo.changelog.tip() ) )
def to_dict( self, view='collection', value_mapper=None ):
rval = super( Repository, self ).to_dict( view=view, value_mapper=value_mapper )
if 'user_id' in rval:
rval[ 'owner' ] = self.user.username
return rval
class RepositoryMetadata( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'missing_test_components',
'tools_functionally_correct', 'do_not_test', 'test_install_error', 'has_repository_dependencies',
'includes_datatypes', 'includes_tools', 'includes_tool_dependencies', 'includes_tools_for_display_in_tool_panel',
'includes_workflows', 'time_last_tested' )
dict_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'missing_test_components',
'tools_functionally_correct', 'do_not_test', 'test_install_error', 'time_last_tested', 'tool_test_results',
'has_repository_dependencies', 'includes_datatypes', 'includes_tools', 'includes_tool_dependencies',
'includes_tools_for_display_in_tool_panel', 'includes_workflows', 'repository_dependencies' )
def __init__( self, id=None, repository_id=None, changeset_revision=None, metadata=None, tool_versions=None, malicious=False,
downloadable=False, missing_test_components=None, tools_functionally_correct=False, do_not_test=False,
test_install_error=False, time_last_tested=None, tool_test_results=None, has_repository_dependencies=False,
includes_datatypes=False, includes_tools=False, includes_tool_dependencies=False, includes_workflows=False ):
self.id = id
self.repository_id = repository_id
self.changeset_revision = changeset_revision
self.metadata = metadata
self.tool_versions = tool_versions
self.malicious = malicious
self.downloadable = downloadable
self.missing_test_components = missing_test_components
self.tools_functionally_correct = tools_functionally_correct
self.do_not_test = do_not_test
self.test_install_error = test_install_error
self.time_last_tested = time_last_tested
self.tool_test_results = tool_test_results
self.has_repository_dependencies = has_repository_dependencies
# We don't consider the special case has_repository_dependencies_only_if_compiling_contained_td here.
self.includes_datatypes = includes_datatypes
self.includes_tools = includes_tools
self.includes_tool_dependencies = includes_tool_dependencies
self.includes_workflows = includes_workflows
@property
def includes_tools_for_display_in_tool_panel( self ):
if self.metadata:
tool_dicts = self.metadata.get( 'tools', [] )
for tool_dict in tool_dicts:
if tool_dict.get( 'add_to_tool_panel', True ):
return True
return False
@property
def repository_dependencies( self ):
if self.has_repository_dependencies:
return [ repository_dependency for repository_dependency in self.metadata[ 'repository_dependencies' ][ 'repository_dependencies' ] ]
return []
class SkipToolTest( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_metadata_id', 'initial_changeset_revision' )
dict_element_visible_keys = ( 'id', 'repository_metadata_id', 'initial_changeset_revision', 'comment' )
def __init__( self, id=None, repository_metadata_id=None, initial_changeset_revision=None, comment=None ):
self.id = id
self.repository_metadata_id = repository_metadata_id
self.initial_changeset_revision = initial_changeset_revision
self.comment = comment
def as_dict( self, value_mapper=None ):
return self.to_dict( view='element', value_mapper=value_mapper )
class RepositoryReview( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted' )
dict_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted' )
approved_states = Bunch( NO='no', YES='yes' )
def __init__( self, repository_id=None, changeset_revision=None, user_id=None, rating=None, deleted=False ):
self.repository_id = repository_id
self.changeset_revision = changeset_revision
self.user_id = user_id
self.rating = rating
self.deleted = deleted
class ComponentReview( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted' )
dict_element_visible_keys = ( 'id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted' )
approved_states = Bunch( NO='no', YES='yes', NA='not_applicable' )
def __init__( self, repository_review_id=None, component_id=None, comment=None, private=False, approved=False, rating=None, deleted=False ):
self.repository_review_id = repository_review_id
self.component_id = component_id
self.comment = comment
self.private = private
self.approved = approved
self.rating = rating
self.deleted = deleted
class Component( object ):
def __init__( self, name=None, description=None ):
self.name = name
self.description = description
class ItemRatingAssociation( object ):
def __init__( self, id=None, user=None, item=None, rating=0, comment='' ):
self.id = id
self.user = user
self.item = item
self.rating = rating
self.comment = comment
def set_item( self, item ):
""" Set association's item. """
pass
class RepositoryRatingAssociation( ItemRatingAssociation ):
def set_item( self, repository ):
self.repository = repository
class Category( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name', 'description', 'deleted' )
dict_element_visible_keys = ( 'id', 'name', 'description', 'deleted' )
def __init__( self, name=None, description=None, deleted=False ):
self.name = name
self.description = description
self.deleted = deleted
class RepositoryCategoryAssociation( object ):
def __init__( self, repository=None, category=None ):
self.repository = repository
self.category = category
class Tag( object ):
def __init__( self, id=None, type=None, parent_id=None, name=None ):
self.id = id
self.type = type
self.parent_id = parent_id
self.name = name
def __str__( self ):
return "Tag(id=%s, type=%i, parent_id=%s, name=%s)" % ( self.id, self.type, self.parent_id, self.name )
class ItemTagAssociation( object ):
def __init__( self, id=None, user=None, item_id=None, tag_id=None, user_tname=None, value=None ):
self.id = id
self.user = user
self.item_id = item_id
self.tag_id = tag_id
self.user_tname = user_tname
self.value = None
self.user_value = None
class PostJobAction( object ):
def __init__( self, action_type, workflow_step, output_name=None, action_arguments=None):
self.action_type = action_type
self.output_name = output_name
self.action_arguments = action_arguments
self.workflow_step = workflow_step
class StoredWorkflowAnnotationAssociation( object ):
pass
class WorkflowStepAnnotationAssociation( object ):
pass
class Workflow( object ):
def __init__( self ):
self.user = None
self.name = None
self.has_cycles = None
self.has_errors = None
self.steps = []
class WorkflowStep( object ):
def __init__( self ):
self.id = None
self.type = None
self.name = None
self.tool_id = None
self.tool_inputs = None
self.tool_errors = None
self.position = None
self.input_connections = []
self.config = None
self.label = None
class WorkflowStepConnection( object ):
def __init__( self ):
self.output_step = None
self.output_name = None
self.input_step = None
self.input_name = None
# Utility methods
def sort_by_attr( seq, attr ):
"""
Sort the sequence of objects by object's attribute
Arguments:
seq - the list or any sequence (including immutable one) of objects to sort.
attr - the name of attribute to sort by
"""
# Use the "Schwartzian transform"
# Create the auxiliary list of tuples where every i-th tuple has form
# (seq[i].attr, i, seq[i]) and sort it. The second item of tuple is needed not
# only to provide stable sorting, but mainly to eliminate comparison of objects
# (which can be expensive or prohibited) in case of equal attribute values.
intermed = map( None, map( getattr, seq, ( attr, ) * len( seq ) ), xrange( len( seq ) ), seq )
intermed.sort()
return map( operator.getitem, intermed, ( -1, ) * len( intermed ) )
def directory_hash_id( id ):
s = str( id )
l = len( s )
# Shortcut -- ids 0-999 go under ../000/
if l < 4:
return [ "000" ]
# Pad with zeros until a multiple of three
padded = ( ( ( 3 - len( s ) ) % 3 ) * "0" ) + s
# Drop the last three digits -- 1000 files per directory
padded = padded[:-3]
# Break into chunks of three
return [ padded[i * 3:(i + 1) * 3] for i in range( len( padded ) // 3 ) ]
|
the-stack_106_30015 | import mechanicalsoup as ms
import os
import re
from win10toast import ToastNotifier
from time import sleep
from sys import exit
browser = ms.StatefulBrowser()
captive_portal_url = 'http://172.16.40.5:8090/httpclient.html'
try:
browser.open(captive_portal_url)
except:
exit(0)
#openfile
try:
accounts = open(os.getenv('APPDATA') + "/AutoCaptive/accounts.txt")
except:
toastn = ToastNotifier()
toastn.show_toast("AutoCaptive" , "Cannot find account",icon_path = "AutoCaptive.ico")
exit(1)
lines = accounts.readlines();
user_acc = [tuple(i.strip().split()) for i in lines]
success_message = 'You have successfully logged in'
for i in user_acc:
browser.open(captive_portal_url)
browser.select_form()
browser['username'] = i[0]
browser['password'] = i[1]
res = browser.submit_selected()
if re.findall(success_message, str(res.text)):
toastn = ToastNotifier()
toastn.show_toast("AutoCaptive" , "Successfully logged in with " + i[0] , icon_path = "AutoCaptive.ico")
while 1:
browser.refresh()
sleep(240)
toastn = ToastNotifier()
toastn.show_toast("AutoCaptive" , "All accounts are being used", icon_path = "AutoCaptive.ico")
exit(2)
|
the-stack_106_30016 | import setuptools
with open('README.rst', 'r') as readme_file:
long_description = readme_file.read()
setuptools.setup(
name='django-cpf-cnpj',
version='1.0.0',
long_description=long_description,
long_description_content_type = 'text/x-rst',
description='A django model and form field for normalised cpf and cnpj.',
url='https://github.com/flavianogjc/django-cpf-cnpj',
author='flavianogjc',
author_email='[email protected]',
platforms='OS Independent',
license='MIT',
classifiers=[
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
python_requires='>=3.6',
install_requires=['Django >= 2.2',],
packages=['django_cpf_cnpj',]
)
|
the-stack_106_30018 | import argparse
import os
import numpy as np
import tensorflow as tf
from waymo_toolkit.extractor import ImageExtractor, LabelExtractor, LaserExtractor
from waymo_toolkit.utils.logger import setup_logger
logger = setup_logger("extractor")
def extract(source_dir: str, save_dir: str, args):
files = os.listdir(source_dir)
files = [os.path.join(source_dir, _) for _ in files if _.endswith("tfrecord")]
assert len(files) > 0
dataset = tf.data.TFRecordDataset(files)
if args.image:
imgext = ImageExtractor(dataset, save_dir)
imgext.extract()
if args.label:
labext = LabelExtractor(dataset, save_dir)
labext.extract()
if args.laser:
lasext = LaserExtractor(dataset, save_dir)
lasext.extract()
if args.subset:
seed = args.seed
percentage = args.percentage
files = sorted(
[int(_.replace(".pb", "")) for _ in os.listdir(os.path.join(save_dir, "label"))]
)
files = np.array(files)
total = len(files)
logger.info(
"{} frames in {} totally".format(total, save_dir)
) # training: 158081 # validation: 39987
np.random.seed(seed)
index = np.random.choice(total, int(percentage * total), replace=False)
selected = files[index]
np.savetxt(os.path.join(save_dir, "split.txt"), selected)
def main():
parser = argparse.ArgumentParser(description="Extracting the elements from Waymo")
parser.add_argument("--source", required=True, help="provide source path to waymo", type=str)
parser.add_argument("--dest", required=True, help="provide destination path", type=str)
parser.add_argument(
"--type",
default="train",
help="type of the extrated data, in ['train', 'val', 'test', 'all']",
type=str,
)
parser.add_argument("--image", action="store_true", help="whether to extract images")
parser.add_argument("--label", action="store_true", help="whether to extract labels")
parser.add_argument("--laser", action="store_true", help="whether to extract lasers")
parser.add_argument("--subset", action="store_true", help="whether to extract the subset")
parser.add_argument(
"--seed", default=20200319, help="random seed for select the subset", type=int
)
parser.add_argument("--percentage", default=0.1, help="the percentage of subset", type=float)
args = parser.parse_args()
source_dir = args.source
save_dir = args.dest
assert args.type in ["train", "val", "test", "all"]
if args.type == "all":
for fold in ["training_seg", "validation_seg", "testing_seg"]:
source_folder = os.path.join(source_dir, fold)
save_folder = os.path.join(save_dir, fold.replace("_seg", ""))
if fold == "testing_seg":
args.subset = False
extract(source_folder, save_folder, args)
elif args.type == "train":
source_dir = os.path.join(source_dir, "training_seg")
save_dir = os.path.join(save_dir, "training")
elif args.type == "val":
source_dir = os.path.join(source_dir, "validation_seg")
save_dir = os.path.join(save_dir, "validation")
elif args.type == "test":
source_dir = os.path.join(source_dir, "testing_seg")
save_dir = os.path.join(save_dir, "testing")
extract(source_dir, save_dir, args)
if __name__ == "__main__":
main()
|
the-stack_106_30021 | '''
Rice数据集。
图片默认文件夹:./refine_RiceDataset
- allData 返回所有数据
- 返回值:(x, y)
x: 文件路径
y: 分类
- splitted_data 获取划分好的数据和标签
- 返回值:(x_train, x_test, y_train, y_test)
- read_image_tensor 读取图片数据tensor
- image_tensor_norm 对图片数据tensor进行规范化
'''
import numpy as np
import os
import glob
import tensorflow as tf
from sklearn.model_selection import train_test_split
SOUND = 1 # 完善粒
UNSOUND = 0 # 不完善粒
MAX_LEFT_PADDING=2
MAX_TOP_PADDING=2
IMAGE_HEIGHT = 224 - MAX_LEFT_PADDING
IMAGE_WIDTH = 224 - MAX_TOP_PADDING
# 加载数据集
data_dir=os.path.join('.','refine_RiceDataset')
sound_dir = os.path.join(data_dir,'**',str(SOUND),'**','*.jpg')
unsound_dir = os.path.join(data_dir,'**',str(UNSOUND),'**','*.jpg')
def soundData(data_dir=sound_dir):
path_sound = glob.glob(sound_dir, recursive=True)
print('sound examples: ', len(path_sound))
sound_labels = np.ones(len(path_sound)).astype(int)
return path_sound, sound_labels
def unsoundData(data_dir=unsound_dir):
path_unsound = glob.glob(unsound_dir, recursive=True)
print('unsound examples: ', len(path_unsound))
unsound_labels = np.zeros(len(path_unsound)).astype(int)
return path_unsound, unsound_labels
def allData():
x_sound, y_sound= soundData()
x_unsound, y_unsound = unsoundData()
x = np.append(x_sound, x_unsound)
y = np.append(y_sound, y_unsound)
return x, y
# 划分训练集和测试集
def splitted_data(**kw):
x, y = allData()
x_train, x_test, y_train, y_test = train_test_split(x, y, **kw)
assert len(x_train) == len(y_train)
assert len(x_test) == len(y_test)
return x_train, x_test, y_train, y_test
'''
根据路径读取图片数据
'''
def read_image_tensor(x, left_padding, top_padding):
image_string = tf.read_file(x)
image = tf.image.decode_image(image_string)
image = tf.image.crop_to_bounding_box(image, top_padding, left_padding, IMAGE_HEIGHT, IMAGE_WIDTH)
return image
'''
正则化图片数据
'''
def image_tensor_norm(image_tensor):
return tf.divide(tf.subtract(tf.cast(image_tensor, tf.float32), 128.), 128.)
def _read_image(x, y, lp, tp):
image = read_image_tensor(x, lp, tp)
image = image_tensor_norm(image)
return image, y
def _read_image_validate(x, y):
image = read_image_tensor(x, 0, 0)
image = image_tensor_norm(image)
return image, y
'''
创建训练集的Dataset Iterator
'''
def train_dataset_iterator(x_train, y_train, left_padding, top_padding,
batch=1, epoch=1,
shuffle=True, shuffle_buffer_size=100, train_set_length=0, max_left_padding=0, max_top_padding=0):
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train, left_padding, top_padding)).repeat(epoch)
if shuffle:
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.map(_read_image).batch(batch)
iterator = train_ds.make_initializable_iterator()
return iterator
def validate_dataset_iterator(x_test, y_test, batch=20):
validate_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).shuffle(10000).map(_read_image_validate).batch(batch)
iterator = validate_ds.make_initializable_iterator()
return iterator
|
the-stack_106_30026 | #!/usr/bin/env python
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the export database utility which allows users to export
metadata for objects in a database and data for tables.
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import multiprocessing
import os
import shutil
import sys
import tempfile
import time
from mysql.utilities.command.dbexport import export_databases
from mysql.utilities.command.dbexport import multiprocess_db_export_task
from mysql.utilities.common.ip_parser import parse_connection
from mysql.utilities.common.options import (
add_all, add_character_set_option, add_format_option, add_locking,
add_no_headers_option, add_regexp, add_rpl_mode, add_rpl_user,
add_skip_options, add_verbosity, check_all, check_rpl_options,
check_skip_options, check_verbosity, setup_common_options,
check_password_security, get_ssl_dict,
)
from mysql.utilities.common.sql_transform import (is_quoted_with_backticks,
remove_backtick_quoting)
from mysql.utilities.common.tools import (check_connector_python,
print_elapsed_time)
from mysql.utilities.exception import FormatError
from mysql.utilities.exception import UtilError
# Constants
NAME = "MySQL Utilities - mysqldbexport "
DESCRIPTION = "mysqldbexport - export metadata and data from databases"
USAGE = "%prog --server=user:pass@host:port:socket db1, db2, db3"
_PERMITTED_DISPLAY = ["names", "brief", "full"]
_PERMITTED_EXPORTS = ["data", "definitions", "both"]
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Needed for freeze support to avoid RuntimeError when running as a Windows
# executable, otherwise ignored.
multiprocessing.freeze_support()
# Setup the command parser and setup server, help
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE)
# Setup utility-specific options:
# Add character set option
add_character_set_option(parser)
# Output format
add_format_option(parser, "display the output in either sql (default), "
"grid, tab, csv, or vertical format", "sql", True)
# Display format
parser.add_option("-d", "--display", action="store", dest="display",
default="brief", help="control the number of columns "
"shown: 'brief' = minimal columns for object creation "
"(default), 'full' = all columns, 'names' = only object "
"names (not valid for --format=sql)", type="choice",
choices=_PERMITTED_DISPLAY)
# Export mode
parser.add_option("-e", "--export", action="store", dest="export",
default="definitions", help="control the export of "
"either 'data' = only the table data for the tables in "
"the database list, 'definitions' = export only the "
"definitions for the objects in the database list, or "
"'both' = export the metadata followed by the data "
"(default: export definitions)", type="choice",
choices=_PERMITTED_EXPORTS)
# Single insert mode
parser.add_option("-b", "--bulk-insert", action="store_true",
dest="bulk_import", default=False,
help="use bulk insert statements for data "
"(default:False)")
# No header option
add_no_headers_option(parser, restricted_formats=['tab', 'csv'])
# Skip blobs for export
parser.add_option("--skip-blobs", action="store_true", dest="skip_blobs",
default=False, help="do not export blob data.")
# File-per-table mode
parser.add_option("--file-per-table", action="store_true",
dest="file_per_tbl", default=False, help="write table "
"data to separate files. Valid only for --export=data "
"or --export=both.")
# Add the exclude database option
parser.add_option("-x", "--exclude", action="append", dest="exclude",
type="string", default=None, help="exclude one or more "
"objects from the operation using either a specific "
"name (e.g. db1.t1), a LIKE pattern (e.g. db1.t% or "
"db%.%) or a REGEXP search pattern. To use a REGEXP "
"search pattern for all exclusions, you must also "
"specify the --regexp option. Repeat the --exclude "
"option for multiple exclusions.")
# Add the all database options
add_all(parser, "databases")
# Add the skip common options
add_skip_options(parser)
# Add verbosity and quiet (silent) mode
add_verbosity(parser, True)
# Add regexp
add_regexp(parser)
# Add locking
add_locking(parser)
# Replication user and password
add_rpl_user(parser)
# Add replication options
add_rpl_mode(parser)
parser.add_option("--skip-gtid", action="store_true", default=False,
dest="skip_gtid", help="skip creation of GTID_PURGED "
"statements.")
# Add comment replication output
parser.add_option("--comment-rpl", action="store_true", default=False,
dest="comment_rpl", help="place the replication "
"statements in comment statements. Valid only with "
"--rpl option.")
parser.add_option("--skip-fkey-checks", action="store_true", default=False,
dest="skip_fkeys", help="skip creation of foreign key "
"disable/enable statements.")
# Add multiprocessing option.
parser.add_option("--multiprocess", action="store", dest="multiprocess",
type="int", default="1", help="use multiprocessing, "
"number of processes to use for concurrent execution. "
"Special values: 0 (number of processes equal to the "
"CPUs detected) and 1 (default - no concurrency).")
# Add output file option.
parser.add_option("--output-file", action="store", dest="output_file",
help="path and file name to store the generated output, "
"by default the standard output (no file).")
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args, "# ")
# Warn if quiet and verbosity are both specified
check_verbosity(opt)
try:
skips = check_skip_options(opt.skip_objects)
except UtilError:
_, err, _ = sys.exc_info()
print("ERROR: {0}".format(err.errmsg))
sys.exit(1)
# Fail if no db arguments or all
if len(args) == 0 and not opt.all:
parser.error("You must specify at least one database to export or "
"use the --all option to export all databases.")
# Check replication options
check_rpl_options(parser, opt)
# Fail if we have arguments and all databases option listed.
check_all(parser, opt, args, "databases")
if opt.skip_blobs and not opt.export == "data":
print("# WARNING: --skip-blobs option ignored for metadata export.")
if opt.file_per_tbl and opt.export in ("definitions", "both"):
print("# WARNING: --file-per-table option ignored for metadata "
"export.")
if "data" in skips and opt.export == "data":
print("ERROR: You cannot use --export=data and --skip-data when "
"exporting table data.")
sys.exit(1)
# Process --exclude values to remove unnecessary quotes (when used) in
# order to avoid further matching issues.
if opt.exclude:
# Remove unnecessary outer quotes.
exclude_list = [pattern.strip("'\"") for pattern in opt.exclude]
else:
exclude_list = opt.exclude
# Check multiprocessing options.
if opt.multiprocess < 0:
parser.error("Number of processes '{0}' must be greater or equal than "
"zero.".format(opt.multiprocess))
num_cpu = multiprocessing.cpu_count()
if opt.multiprocess > num_cpu and not opt.quiet:
print("# WARNING: Number of processes '{0}' is greater than the "
"number of CPUs '{1}'.".format(opt.multiprocess, num_cpu))
# Warning for non-posix (windows) systems if too many process are used.
num_db = len(args)
if (os.name != 'posix' and num_db and opt.multiprocess > num_db
and not opt.quiet):
print("# WARNING: Number of processes '{0}' is greater than the "
"number of databases to export '{1}'.".format(opt.multiprocess,
num_db))
# Check output_file option.
if opt.output_file:
# Check if file already exists.
if os.path.exists(opt.output_file) and not opt.quiet:
print("# WARNING: Specified output file already exists. The file "
"will be overwritten.")
output_filename = opt.output_file
try:
output_file = open(output_filename, 'w')
except IOError:
parser.error("Unable to create file (check path and access "
"privileges): {0}".format(opt.output_file))
else:
# Always send output to a file for performance reasons (contents sent
# at the end to the stdout).
output_file = tempfile.NamedTemporaryFile(delete=False)
output_filename = None
# Set options for database operations.
options = {
"skip_tables": "tables" in skips,
"skip_views": "views" in skips,
"skip_triggers": "triggers" in skips,
"skip_procs": "procedures" in skips,
"skip_funcs": "functions" in skips,
"skip_events": "events" in skips,
"skip_grants": "grants" in skips,
"skip_create": "create_db" in skips,
"skip_data": "data" in skips,
"skip_blobs": opt.skip_blobs,
"skip_fkeys": opt.skip_fkeys,
"format": opt.format,
"no_headers": opt.no_headers,
"display": opt.display,
"single": not opt.bulk_import,
"quiet": opt.quiet,
"verbosity": opt.verbosity,
"debug": opt.verbosity >= 3,
"file_per_tbl": opt.file_per_tbl,
"exclude_patterns": exclude_list,
"all": opt.all,
"use_regexp": opt.use_regexp,
"locking": opt.locking,
"rpl_user": opt.rpl_user,
"rpl_mode": opt.rpl_mode,
"rpl_file": opt.rpl_file,
"comment_rpl": opt.comment_rpl,
"export": opt.export,
"skip_gtid": opt.skip_gtid,
"charset": opt.charset,
"multiprocess": num_cpu if opt.multiprocess == 0 else opt.multiprocess,
"output_filename": output_filename,
}
# Parse server connection values
try:
options.update(get_ssl_dict(opt))
server_values = parse_connection(opt.server, None, options)
except FormatError:
_, err, _ = sys.exc_info()
parser.error("Server connection values invalid: {0}.".format(err))
except UtilError:
_, err, _ = sys.exc_info()
parser.error("Server connection values invalid: "
"{0}.".format(err.errmsg))
# Build list of databases to copy
db_list = []
for db in args:
# Remove backtick quotes (handled later)
db = remove_backtick_quoting(db) \
if is_quoted_with_backticks(db) else db
db_list.append(db)
try:
# record start time
if opt.verbosity >= 3:
start_export_time = time.time()
# Export databases concurrently for non posix systems (windows).
if options['multiprocess'] > 1 and os.name != 'posix':
# Create export databases tasks.
export_db_tasks = []
for db in db_list:
export_task = {
'srv_con': server_values,
'db_list': [db],
'options': options,
}
export_db_tasks.append(export_task)
# Create process pool.
workers_pool = multiprocessing.Pool(
processes=options['multiprocess']
)
# Concurrently export databases.
res = workers_pool.map_async(multiprocess_db_export_task,
export_db_tasks)
workers_pool.close()
# Get list of temporary files with the exported data.
tmp_files_list = res.get()
workers_pool.join()
# Merge resulting temp files (if generated).
for tmp_filename in tmp_files_list:
if tmp_filename:
tmp_file = open(tmp_filename, 'r')
shutil.copyfileobj(tmp_file, output_file)
tmp_file.close()
os.remove(tmp_filename)
else:
# Export all specified databases (no database level concurrency).
# Note: on POSIX systems multiprocessing is applied at the table
# level (not database).
export_databases(server_values, db_list, output_file, options)
if output_filename is None:
# Dump the export output to the stdout.
output_file.seek(0)
shutil.copyfileobj(output_file, sys.stdout)
output_file.close()
os.remove(output_file.name)
# record elapsed time
if opt.verbosity >= 3:
sys.stdout.flush()
print_elapsed_time(start_export_time)
except UtilError:
_, err, _ = sys.exc_info()
print("ERROR: {0}".format(err.errmsg))
sys.exit(1)
sys.exit()
|
the-stack_106_30027 | answer = 'Y'
my_list = []
while answer == 'Y':
choice = int(input('Enter a number: '))
if choice not in my_list:
my_list.append(choice)
print('Number added to list')
else:
print('Repeated number, not added')
answer = input('Do you want to continue? [Y/N] ').upper()
print(f'The numbers entered were: {sorted(my_list)}')
|
the-stack_106_30029 | from setuptools import find_packages, setup
__version__ = "3.0.0"
setup(
# package name in pypi
name="django-oscar-api",
# extract version from module.
version=__version__,
description="REST API module for django-oscar",
long_description=open("README.rst").read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Framework :: Django :: 3.1",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
python_requires=">=3.6",
keywords="",
author="Lars van de Kerkhof, Martijn Jacobs",
author_email="[email protected], [email protected]",
url="https://github.com/django-oscar/django-oscar-api",
license="BSD",
packages=find_packages(
exclude=[
"*tests.unit",
"*tests.serializers*",
"*tests.doctests*",
"*fixtures",
"*fixtures*",
"*sandbox*",
]
),
# include non python files
include_package_data=True,
zip_safe=False,
# specify dependencies
install_requires=[
"setuptools",
"django-oscar>=3.0",
"Django>=2.2.13", # CVE-2020-9402
"djangorestframework>=3.9", # first version to support Django 2.2
],
# mark test target to require extras.
extras_require={
"dev": ["coverage", "mock", "twine", "wheel", "easy_thumbnails"],
"lint": ["flake8", "flake8-black", "flake8-bugbear", "black>=19.10b0"],
},
)
|
the-stack_106_30030 | # Copyright 2019 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from uuid import uuid4
from google.cloud import firestore
from flask import Flask, make_response, request
app = Flask(__name__)
db = firestore.Client()
sessions = db.collection('sessions')
greetings = [
'Hello World',
'Hallo Welt',
'Ciao Mondo',
'Salut le Monde',
'Hola Mundo',
]
@firestore.transactional
def get_session_data(transaction, session_id):
""" Looks up (or creates) the session with the given session_id.
Creates a random session_id if none is provided. Increments
the number of views in this session. Updates are done in a
transaction to make sure no saved increments are overwritten.
"""
if session_id is None:
session_id = str(uuid4()) # Random, unique identifier
doc_ref = sessions.document(document_id=session_id)
doc = doc_ref.get(transaction=transaction)
if doc.exists:
session = doc.to_dict()
else:
session = {
'greeting': random.choice(greetings),
'views': 0
}
session['views'] += 1 # This counts as a view
transaction.set(doc_ref, session)
session['session_id'] = session_id
return session
@app.route('/', methods=['GET'])
def home():
template = '<body>{} views for {}</body>'
transaction = db.transaction()
session = get_session_data(transaction, request.cookies.get('session_id'))
resp = make_response(template.format(
session['views'],
session['greeting']
)
)
resp.set_cookie('session_id', session['session_id'], httponly=True)
return resp
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080)
|
the-stack_106_30031 | import pytest
import vtk
import pyvista
from pyvista import colors
@pytest.fixture
def default_theme():
return pyvista.themes.DefaultTheme()
def test_backwards_compatibility():
try:
color = (0.1, 0.4, 0.7)
pyvista.rcParams['color'] = color
assert pyvista.rcParams['color'] == color
# test nested values
init_value = pyvista.rcParams['axes']['show']
pyvista.rcParams['axes']['show'] = not init_value
assert pyvista.rcParams['axes']['show'] is not init_value
finally:
# always return to testing theme
pyvista.set_plot_theme('testing')
@pytest.mark.parametrize('parm', [('enabled', True),
('occlusion_ratio', 0.5),
('number_of_peels', 2)]
)
def test_depth_peeling_config(default_theme, parm):
attr, value = parm
assert hasattr(default_theme.depth_peeling, attr)
setattr(default_theme.depth_peeling, attr, value)
assert getattr(default_theme.depth_peeling, attr) == value
def test_depth_peeling_eq(default_theme):
my_theme = pyvista.themes.DefaultTheme()
my_theme.depth_peeling.enabled = not my_theme.depth_peeling.enabled
assert my_theme.depth_peeling != default_theme.depth_peeling
assert my_theme.depth_peeling != 1
@pytest.mark.parametrize('parm', [('color', (0.1, 0.1, 0.1)),
('line_width', 1),
('opacity', 1.0),
('feature_angle', 20),
('decimate', 0.5),
])
def test_silhouette_config(default_theme, parm):
attr, value = parm
assert hasattr(default_theme.silhouette, attr)
setattr(default_theme.silhouette, attr, value)
assert getattr(default_theme.silhouette, attr) == value
def test_depth_silhouette_eq(default_theme):
my_theme = pyvista.themes.DefaultTheme()
my_theme.silhouette.opacity = 0.11111
assert my_theme.silhouette != default_theme.silhouette
assert my_theme.silhouette != 1
def test_depth_silhouette_opacity_outside_clamp(default_theme):
my_theme = pyvista.themes.DefaultTheme()
with pytest.raises(ValueError):
my_theme.silhouette.opacity = 10
with pytest.raises(ValueError):
my_theme.silhouette.opacity = -1
@pytest.mark.parametrize('parm', [('slider_length', 0.03),
('slider_width', 0.02),
('slider_color', (0.5, 0.5, 0.3)),
('tube_width', 0.02),
('tube_color', (0.5, 0.5, 0.5)),
('cap_opacity', 0.5),
('cap_length', 0.02),
('cap_width', 0.04)
])
@pytest.mark.parametrize('style', ('modern', 'classic'))
def test_slider_style_config(default_theme, parm, style):
attr, value = parm
slider_style = getattr(default_theme.slider_styles, style)
assert hasattr(slider_style, attr)
setattr(slider_style, attr, value)
assert getattr(slider_style, attr) == value
def test_slider_style_config_eq(default_theme):
assert default_theme.slider_styles.modern != default_theme.slider_styles.classic
assert default_theme.slider_styles.modern != 1
def test_slider_style_eq(default_theme):
my_theme = pyvista.themes.DefaultTheme()
my_theme.slider_styles.modern.slider_length *= 2
assert default_theme.slider_styles != my_theme.slider_styles
def test_invalid_color_str_single_char():
with pytest.raises(ValueError):
colors.string_to_rgb('x')
def test_color_str():
clr = colors.string_to_rgb("k")
assert (0.0, 0.0, 0.0) == clr
clr = colors.string_to_rgb("black")
assert (0.0, 0.0, 0.0) == clr
clr = colors.string_to_rgb("white")
assert (1.0, 1.0, 1.0) == clr
with pytest.raises(ValueError):
colors.string_to_rgb('not a color')
def test_font():
font = pyvista.parse_font_family('times')
assert font == vtk.VTK_TIMES
with pytest.raises(ValueError):
pyvista.parse_font_family('not a font')
def test_font_eq(default_theme):
defa_theme = pyvista.themes.DefaultTheme()
assert defa_theme.font == default_theme.font
paraview_theme = pyvista.themes.ParaViewTheme()
assert paraview_theme.font != default_theme.font
assert paraview_theme.font != 1
def test_font_family(default_theme):
font = 'courier'
default_theme.font.family = font
assert default_theme.font.family == font
with pytest.raises(ValueError):
default_theme.font.family = 'bla'
def test_font_title_size(default_theme):
default_theme.font.title_size = None
assert default_theme.font.title_size is None
def test_font_label_size(default_theme):
default_theme.font.label_size = None
assert default_theme.font.label_size is None
def test_font_fmt(default_theme):
fmt = '%.6e'
default_theme.font.fmt = fmt
assert default_theme.font.fmt == fmt
def test_axes_eq(default_theme):
assert default_theme.axes == pyvista.themes.DefaultTheme().axes
theme = pyvista.themes.DefaultTheme()
theme.axes.box = True
assert default_theme.axes != theme.axes
assert default_theme.axes != 1
def test_theme_wrong_type(default_theme):
with pytest.raises(TypeError):
default_theme.font = None
with pytest.raises(TypeError):
default_theme.colorbar_horizontal = None
with pytest.raises(TypeError):
default_theme.colorbar_vertical = None
with pytest.raises(TypeError):
default_theme.depth_peeling = None
with pytest.raises(TypeError):
default_theme.silhouette = None
with pytest.raises(TypeError):
default_theme.slider_styles = None
with pytest.raises(TypeError):
default_theme.slider_styles.classic = None
with pytest.raises(TypeError):
default_theme.slider_styles.modern = None
with pytest.raises(TypeError):
default_theme.axes = None
def test_axes_box(default_theme):
new_value = not default_theme.axes.box
default_theme.axes.box = new_value
assert default_theme.axes.box == new_value
def test_axes_show(default_theme):
new_value = not default_theme.axes.show
default_theme.axes.show = new_value
assert default_theme.axes.show == new_value
def test_colorbar_eq(default_theme):
theme = pyvista.themes.DefaultTheme()
assert default_theme.colorbar_horizontal == theme.colorbar_horizontal
assert default_theme.colorbar_horizontal != 1
assert default_theme.colorbar_horizontal != theme.colorbar_vertical
def test_colorbar_height(default_theme):
height = 0.3
default_theme.colorbar_horizontal.height = height
assert default_theme.colorbar_horizontal.height == height
def test_colorbar_width(default_theme):
width = 0.3
default_theme.colorbar_horizontal.width = width
assert default_theme.colorbar_horizontal.width == width
def test_colorbar_position_x(default_theme):
position_x = 0.3
default_theme.colorbar_horizontal.position_x = position_x
assert default_theme.colorbar_horizontal.position_x == position_x
def test_colorbar_position_y(default_theme):
position_y = 0.3
default_theme.colorbar_horizontal.position_y = position_y
assert default_theme.colorbar_horizontal.position_y == position_y
@pytest.mark.parametrize('theme', pyvista.themes._ALLOWED_THEMES)
def test_themes(theme):
try:
pyvista.set_plot_theme(theme.name)
assert pyvista.global_theme == theme.value()
finally:
# always return to testing theme
pyvista.set_plot_theme('testing')
def test_invalid_theme():
with pytest.raises(KeyError):
pyvista.set_plot_theme('this is not a valid theme')
def test_invalid_theme_type_error():
with pytest.raises(TypeError):
pyvista.set_plot_theme(1)
def test_set_theme():
theme = pyvista.themes.DarkTheme()
try:
pyvista.set_plot_theme(theme)
assert pyvista.global_theme == theme
finally:
# always return to testing theme
pyvista.set_plot_theme('testing')
def test_invalid_load_theme(default_theme):
with pytest.raises(TypeError):
default_theme.load_theme(123)
def test_window_size(default_theme):
with pytest.raises(ValueError):
default_theme.window_size = [1, 2, 3]
with pytest.raises(ValueError, match='Window size must be a positive value'):
default_theme.window_size = [-1, -2]
window_size = [1, 1]
default_theme.window_size = window_size
assert default_theme.window_size == window_size
def test_camera(default_theme):
with pytest.raises(TypeError):
default_theme.camera = [1, 0, 0]
with pytest.raises(KeyError, match='Expected the "viewup"'):
default_theme.camera = {'position': [1, 0, 0]}
with pytest.raises(KeyError, match='Expected the "position"'):
default_theme.camera = {'viewup': [1, 0, 0]}
camera = {'position': [1, 0, 0], 'viewup': [1, 0, 0]}
default_theme.camera = camera
assert default_theme.camera == camera
def test_cmap(default_theme):
cmap = 'jet'
default_theme.cmap = cmap
assert default_theme.cmap == cmap
with pytest.raises(ValueError, match='not a valid value'):
default_theme.cmap = 'not a color map'
def test_volume_mapper(default_theme):
assert hasattr(default_theme, 'volume_mapper')
volume_mapper = 'gpu'
default_theme.volume_mapper = volume_mapper
assert default_theme.volume_mapper == volume_mapper
with pytest.raises(ValueError, match='unknown'):
default_theme.volume_mapper = 'invalid'
def test_set_hidden_line_removal(default_theme):
default_theme.hidden_line_removal = True
assert default_theme.hidden_line_removal is True
default_theme.hidden_line_removal = False
assert default_theme.hidden_line_removal is False
@pytest.mark.parametrize('parm', [('background', (0.1, 0.2, 0.3)),
('auto_close', False),
('notebook', False),
('full_screen', True),
('nan_color', (0.5, 0.5, 0.5)),
('edge_color', (1.0, 0.0, 0.0)),
('outline_color', (1.0, 0.0, 0.0)),
('floor_color', (1.0, 0.0, 0.0)),
('show_scalar_bar', False),
('lighting', False),
('interactive', False),
('render_points_as_spheres', True),
('transparent_background', True),
('title', 'test_title'),
('multi_samples', 10),
('multi_rendering_splitting_position', 0.1),
('smooth_shading', True),
('name', 'test_theme'),
])
def test_theme_parm(default_theme, parm):
attr, value = parm
assert hasattr(default_theme, attr)
setattr(default_theme, attr, value)
assert getattr(default_theme, attr) == value
def test_theme_colorbar_orientation(default_theme):
orient = 'vertical'
default_theme.colorbar_orientation = orient
assert default_theme.colorbar_orientation == orient
with pytest.raises(ValueError):
default_theme.colorbar_orientation = 'invalid'
def test_restore_defaults(default_theme):
orig_value = default_theme.show_edges
default_theme.show_edges = not orig_value
default_theme.restore_defaults()
assert default_theme.show_edges == orig_value
def test_repr(default_theme):
rep = str(default_theme)
assert 'Background' in rep
assert default_theme.cmap in rep
assert str(default_theme.colorbar_orientation) in rep
assert default_theme._name.capitalize() in rep
def test_theme_slots(default_theme):
# verify we can't create an arbitrary attribute
with pytest.raises(AttributeError, match='has no attribute'):
default_theme.new_attr = 1
def test_theme_eq():
defa_theme0 = pyvista.themes.DefaultTheme()
defa_theme1 = pyvista.themes.DefaultTheme()
assert defa_theme0 == defa_theme1
dark_theme = pyvista.themes.DarkTheme()
assert defa_theme0 != dark_theme
# for coverage
assert defa_theme0 != 'apple'
def test_plotter_set_theme():
# test that the plotter theme is set to the new theme
my_theme = pyvista.themes.DefaultTheme()
my_theme.color = [1, 0, 0]
pl = pyvista.Plotter(theme=my_theme)
assert pl.theme.color == my_theme.color
assert pyvista.global_theme.color != pl.theme.color
def test_load_theme(tmpdir, default_theme):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.json'))
pyvista.themes.DarkTheme().save(filename)
loaded_theme = pyvista.load_theme(filename)
assert loaded_theme == pyvista.themes.DarkTheme()
default_theme.load_theme(filename)
assert default_theme == pyvista.themes.DarkTheme()
def test_antialiasing(default_theme):
for value in [True, False]:
default_theme.antialiasing = value
assert default_theme.antialiasing is value
pl = pyvista.Plotter(theme=default_theme)
assert pl.renderer.GetUseFXAA() is value
|
the-stack_106_30032 | # Time: O(nlogn)
# Space: O(n)
import bisect
class Solution(object):
def minimumMountainRemovals(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left_lis_len = [0]*len(nums)
lis = []
for i in range(len(nums)-1):
j = bisect.bisect_left(lis, nums[i])
if j == len(lis):
lis.append(nums[i])
else:
lis[j] = nums[i]
left_lis_len[i] = j
max_len = 0
lis = []
for i in reversed(range(1, len(nums))):
j = bisect.bisect_left(lis, nums[i])
if j == len(lis):
lis.append(nums[i])
else:
lis[j] = nums[i]
if i < len(nums)-1:
max_len = max(max_len, left_lis_len[i]+j)
return len(nums) - (1+max_len)
|
the-stack_106_30034 | #!/usr/bin/python
import paho.mqtt.client as paho
import os
import socket
import ssl
def on_connect(client, userdata, flags, rc):
print("Connection returned result: " + str(rc) )
client.subscribe("#" , 1 )
def on_message(client, userdata, msg):
print("topic: "+msg.topic)
print("payload: "+str(msg.payload))
mqttc = paho.Client()
mqttc.on_connect = on_connect
mqttc.on_message = on_message
awshost = "a1bgnbjjx7840d.iot.us-west-2.amazonaws.com"
awsport = 8883
clientId = "sameers_pi"
thingName = "sameers_pi"
caPath = "/home/pi/deviceSDK/VeriSign-Class 3-Public-Primary-Certification-Authority-G5.pem"
certPath = "/home/pi/deviceSDK/728abdac8d-certificate.pem.crt"
keyPath = "/home/pi/deviceSDK/728abdac8d-private.pem.key"
mqttc.tls_set(caPath, certfile=certPath, keyfile=keyPath, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
mqttc.connect(awshost, awsport, keepalive=60)
mqttc.loop_forever()
|
the-stack_106_30037 | import asyncio
import threading
import logging
import multiprocessing as mp
import pytest
import msgpack
from msgpackio.client import Client
from msgpackio.rpc import RPCClient
from msgpackio.server import RPCServer
from msgpackio.exceptions import RemoteException
log = logging.getLogger(__name__)
def add(a, b):
return a + b
def server(**bindings):
import asyncio
async def main():
loop = asyncio.get_running_loop()
server = await loop.create_server(
lambda: RPCServer(**bindings), "127.0.0.1", 8888
)
async with server:
await server.serve_forever()
asyncio.run(main())
clients = [Client]
@pytest.mark.parametrize("cls", clients)
def test_rpc_client_async(cls):
import multiprocessing as mp
import time
s = mp.Process(target=server, kwargs=dict(add=add))
s.start()
try:
with RPCClient(cls("127.0.0.1", 8888)) as client:
future = client.call_async("add", 1, 2)
assert future.ready() == False
future.wait(1)
assert future.ready() == True
assert future.get() == 3
finally:
s.terminate()
@pytest.mark.parametrize("cls", clients)
def test_rpc_client_async_missing_key(cls):
import multiprocessing as mp
import time
s = mp.Process(target=server)
s.start()
try:
with RPCClient(cls("127.0.0.1", 8888)) as client:
future = client.call_async("add", 1, 2)
assert future.ready() == False
future.wait(1)
assert future.ready() == True
with pytest.raises(RemoteException):
print(future.get())
finally:
s.terminate()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.