filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_23574 | import os
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
app_name = 'templatesadmin'
version = __import__(app_name).__version__
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk(app_name):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[len(app_name)+1:] # Strip "app_name/" or "app_name\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(name='django-'+app_name,
version=version,
description='A Django app to make minor changes to your templates on the fly.',
long_description=open('README.md').read(),
author='Pahaz Blinov',
author_email='[email protected]',
url='https://github.com/pahaz/django-templatesadmin/',
package_dir={app_name: app_name},
packages=packages,
package_data={app_name: data_files},
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
|
the-stack_106_23575 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageQueueMessage(Model):
"""StorageQueueMessage.
:param storage_account: Gets or sets the storage account name.
:type storage_account: str
:param queue_name: Gets or sets the queue name.
:type queue_name: str
:param sas_token: Gets or sets the SAS key.
:type sas_token: str
:param message: Gets or sets the message.
:type message: str
"""
_attribute_map = {
'storage_account': {'key': 'storageAccount', 'type': 'str'},
'queue_name': {'key': 'queueName', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, storage_account=None, queue_name=None, sas_token=None, message=None):
self.storage_account = storage_account
self.queue_name = queue_name
self.sas_token = sas_token
self.message = message
|
the-stack_106_23576 | # -*- coding: utf-8 -*-
"""
flask envs, based on flask_environments
~~~~~~~~~~~~~~~~~~
Environment tools and configuration for Flask applications
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
import os
import yaml
from flask import current_app
class Environments(object):
def __init__(self, app=None, var_name=None, default_env=None):
self.app = app
self.var_name = var_name or 'FLASK_ENV'
self.default_env = default_env or 'DEVELOPMENT'
self.env = os.environ.get(self.var_name, self.default_env)
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config['ENVIRONMENT'] = self.env
if app.extensions is None:
app.extensions = {}
app.extensions['environments'] = self
def get_app(self, reference_app=None):
if reference_app is not None:
return reference_app
if self.app is not None:
return self.app
return current_app
def from_object(self, config_obj):
app = self.get_app()
for name in self._possible_names():
try:
obj = '%s.%s' % (config_obj, name)
app.config.from_object(obj)
return
except:
pass
app.config.from_object(config_obj)
def from_yaml(self, path):
with open(path) as f:
c = yaml.load(f, Loader=yaml.FullLoader)
for name in self._possible_names():
try:
c = c[name]
except:
pass
app = self.get_app()
for key in c:
if key.isupper():
app.config[key] = c[key]
def _possible_names(self):
return [self.env, self.env.capitalize(), self.env.lower()]
|
the-stack_106_23578 | """Transformer for Weisfeiler—Lehman Feature Vector calculations."""
# This code was originally used in the paper 'A Persistent Weisfeiler–Lehman
# Procedure for Graph Classification'. The original author is Christian Bock.
import collections
import copy
import numpy as np
class WeisfeilerLehman:
"""Implement Weisfeiler–Lehman feature vector generation."""
def __init__(self):
"""Create new instance of class."""
self._relabel_steps = collections.defaultdict(dict)
self._label_dict = {}
self._last_new_label = -1
self._preprocess_relabel_dict = {}
self._results = collections.defaultdict(dict)
self._label_dicts = {}
def _reset_label_generation(self):
self._last_new_label = -1
def _get_next_label(self):
self._last_new_label += 1
return self._last_new_label
def _relabel_graphs(self, X):
"""Auxiliary function for relabelling a graph."""
preprocessed_graphs = []
for i, g in enumerate(X):
x = g.copy()
labels = x.vs['label']
new_labels = []
for label in labels:
if label in self._preprocess_relabel_dict.keys():
new_labels.append(self._preprocess_relabel_dict[label])
else:
self._preprocess_relabel_dict[label] = \
self._get_next_label()
new_labels.append(self._preprocess_relabel_dict[label])
x.vs['label'] = new_labels
self._results[0][i] = (labels, new_labels)
preprocessed_graphs.append(x)
self._reset_label_generation()
return preprocessed_graphs
def fit_transform(self, X, num_iterations=3):
"""Perform transformation of input list of graphs."""
X = self._relabel_graphs(X)
for it in np.arange(1, num_iterations+1, 1):
self._reset_label_generation()
self._label_dict = {}
for i, g in enumerate(X):
# Get labels of current interation
current_labels = g.vs['label']
# Get for each vertex the labels of its neighbors
neighbor_labels = self._get_neighbor_labels(g, sort=True)
# Prepend the vertex label to the list of labels of its
# neighbors.
merged_labels = [
[b]+a for a, b in zip(neighbor_labels, current_labels)
]
# Generate a label dictionary based on the merged labels
self._append_label_dict(merged_labels)
# Relabel the graph
new_labels = self._relabel_graph(g, merged_labels)
self._relabel_steps[i][it] = {
idx: {
old_label: new_labels[idx]
} for idx, old_label in enumerate(current_labels)
}
g.vs['label'] = new_labels
self._results[it][i] = (merged_labels, new_labels)
self._label_dicts[it] = copy.deepcopy(self._label_dict)
return self._results
def _relabel_graph(self, X, merged_labels):
"""Extend graph with new merged labels."""
new_labels = []
for merged in merged_labels:
new_labels.append(self._label_dict['-'.join(map(str, merged))])
return new_labels
def _append_label_dict(self, merged_labels):
for merged_label in merged_labels:
dict_key = '-'.join(map(str, merged_label))
if dict_key not in self._label_dict.keys():
self._label_dict[dict_key] = self._get_next_label()
def _get_neighbor_labels(self, X, sort=True):
neighbor_indices = [
[n_v.index for n_v in X.vs[X.neighbors(v.index)]] for v in X.vs
]
neighbor_labels = []
for n_indices in neighbor_indices:
if sort:
neighbor_labels.append(sorted(X.vs[n_indices]['label']))
else:
neighbor_labels.append(X.vs[n_indices]['label'])
return neighbor_labels
|
the-stack_106_23579 | import logging
from easilyb.json_min_db import JsonMinConnexion
from threading import Lock
logger = logging.getLogger(__name__)
NOTHING = object()
def get_config_path(filename='config.yaml', appname=None, appauthor=None, appversion=None,
dirpath=None, app_root_config=None, user_config=None, system_config=None,
approot_config_dirname='config'):
from os.path import join, dirname
# There are 4 options for dirpath:
# 0- given in dirpath argument
# 1- application root dir
# 2- user config dir
# 3- system config dir
if dirpath is None:
if app_root_config is False and user_config is False and system_config is False:
raise ValueError("Required one of: app_root_config, user_config, and system_config be True or None")
if app_root_config is True or not (user_config or system_config):
import sys
dirpath = join(dirname(sys.modules['__main__'].__file__), approot_config_dirname)
elif user_config is True or not system_config:
import appdirs
dirpath = appdirs.user_config_dir(appname, appauthor, appversion)
else:
import appdirs
dirpath = appdirs.site_config_dir(appname, appauthor, appversion)
return join(dirpath, filename)
class EasyConfig:
def __init__(self, appname=None, appauthor=None, appversion=None,
filename=None, dirpath=None, app_root_config=None, user_config=None,
system_config=None,approot_config_dirname='config',
filepath=None, create=True, template=None, template_file=None, indent=3, driver="yaml"):
if filepath is None:
if filename is None:
raise ValueError('Argument required: filepath or filename')
filepath = get_config_path(filename, appname, appauthor, appversion, dirpath,
app_root_config, user_config, system_config,
approot_config_dirname)
self.db = JsonMinConnexion(filepath, create=create, template=template, template_file=template_file,
indent=indent, driver=driver)
self._lock = Lock()
self._save = True
def get(self, key, default=NOTHING, set_default=True, save=True):
try:
return self.db[key]
except:
if default == NOTHING:
raise KeyError(key)
else:
if set_default:
self.set(key, default, save=save)
return default
def set(self, key, value, save=True):
if self._save:
with self._lock:
self.db[key] = value
if save:
self.db.save()
else:
self.db[key] = value
def save(self):
self.db.save()
def __enter__(self):
self._lock.acquire()
self._save = False
self.db.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.db.__exit__(exc_type, exc_val, exc_tb)
finally:
self._save = True
self._lock.release()
|
the-stack_106_23580 | import heterocl as hcl
import numpy as np
from hlib.op.extern import create_extern_module, register_extern_ip
import os
dtype = hcl.Int()
@register_extern_ip(vendor="intel")
def byte_swap_rtl(input_vec, ret=None, name=None):
if name is None: name = "my_byteswap"
Len = input_vec.shape[0]
return_tensors = False
if ret is None:
return_tensors = True
ret = hcl.compute(input_vec.shape, lambda *args: 0, "vec")
# functional behavior
with hcl.Stage("ExternModule") as Module:
hcl.update(ret, lambda *args:
input_vec[args] << 16 | input_vec[args] >> 16, "swap")
dicts = {}
dicts["name"] = name
tensors = [input_vec]
dicts["args"] = [(_.name, _.dtype) for _ in tensors]
# declare headers and typedef
dicts["header"] = "unsigned int my_byteswap(unsigned int x);"
dicts["func"] = """
for (int k = 0; k < {}; k++) {{
vec[k] = my_byteswap({}[k]);
}}
""".format(Len, input_vec.name)
# add dependency files or folders
# the dependencies are copied to project folder
deps = os.path.dirname(os.path.abspath(__file__))
dicts["deps"] = deps + "/lib1"
# custom compilation command (root path: project)
# commands executed before impl or emulation
dicts["cmds"] = "cd lib1; " + \
"aocl library hdl-comp-pkg opencl_lib.xml -o opencl_lib.aoco;" + \
"aocl library create -name opencl_lib opencl_lib.aoco;"
# custom compiler flgas (load custom libs)
dicts["flags"] = "-I lib1 -L lib1 -l opencl_lib.aoclib"
create_extern_module(Module, dicts, ip_type="rtl")
if return_tensors: return ret
|
the-stack_106_23581 | """The Flick Electric integration."""
from datetime import datetime as dt
from pyflick import FlickAPI
from pyflick.authentication import AbstractFlickAuth
from pyflick.const import DEFAULT_CLIENT_ID, DEFAULT_CLIENT_SECRET
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_PASSWORD,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from .const import CONF_TOKEN_EXPIRES_IN, CONF_TOKEN_EXPIRY, DOMAIN
CONF_ID_TOKEN = "id_token"
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Flick Electric from a config entry."""
auth = HassFlickAuth(hass, entry)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = FlickAPI(auth)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
if await hass.config_entries.async_forward_entry_unload(entry, "sensor"):
hass.data[DOMAIN].pop(entry.entry_id)
return True
return False
class HassFlickAuth(AbstractFlickAuth):
"""Implementation of AbstractFlickAuth based on a Home Assistant entity config."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Flick authention based on a Home Assistant entity config."""
super().__init__(aiohttp_client.async_get_clientsession(hass))
self._entry = entry
self._hass = hass
async def _get_entry_token(self):
# No token saved, generate one
if (
CONF_TOKEN_EXPIRY not in self._entry.data
or CONF_ACCESS_TOKEN not in self._entry.data
):
await self._update_token()
# Token is expired, generate a new one
if self._entry.data[CONF_TOKEN_EXPIRY] <= dt.now().timestamp():
await self._update_token()
return self._entry.data[CONF_ACCESS_TOKEN]
async def _update_token(self):
token = await self.get_new_token(
username=self._entry.data[CONF_USERNAME],
password=self._entry.data[CONF_PASSWORD],
client_id=self._entry.data.get(CONF_CLIENT_ID, DEFAULT_CLIENT_ID),
client_secret=self._entry.data.get(
CONF_CLIENT_SECRET, DEFAULT_CLIENT_SECRET
),
)
# Reduce expiry by an hour to avoid API being called after expiry
expiry = dt.now().timestamp() + int(token[CONF_TOKEN_EXPIRES_IN] - 3600)
self._hass.config_entries.async_update_entry(
self._entry,
data={
**self._entry.data,
CONF_ACCESS_TOKEN: token,
CONF_TOKEN_EXPIRY: expiry,
},
)
async def async_get_access_token(self):
"""Get Access Token from HASS Storage."""
token = await self._get_entry_token()
return token[CONF_ID_TOKEN]
|
the-stack_106_23582 | # -*- coding: utf-8 -*-
#
# Jedi documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 26 00:11:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../jedi'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo',
'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jedi'
copyright = u'2012 - {today.year}, Jedi contributors'.format(today=datetime.date.today())
import jedi
_path = os.path.dirname(os.path.abspath(__file__))
_version_strs = [str(x) for x in jedi.__version__]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(_version_strs[:2])
# The full version, including alpha/beta/rc tags.
release = '.'.join(_version_strs)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'sidebarlogo.html',
'localtoc.html',
# 'relations.html',
'ghbuttons.html',
# 'sourcelink.html',
# 'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Jedidoc'
#html_style = 'default.css' # Force usage of default template on RTD
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jedi.tex', u'Jedi Documentation',
u'Jedi contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jedi', u'Jedi Documentation',
[u'Jedi contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Jedi', u'Jedi Documentation',
u'Jedi contributors', 'Jedi', 'Awesome Python autocompletion library.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for todo module ---------------------------------------------------
todo_include_todos = False
# -- Options for autodoc module ------------------------------------------------
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = []
#autodoc_default_flags = ['members', 'undoc-members']
# -- Options for intersphinx module --------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/': None,
}
|
the-stack_106_23584 | from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
bot.run(token)
|
the-stack_106_23585 | import os
################ MAKE CHANGES HERE #################
inputDir = "input" # path to the input sequence PNGs
inputFileFormat = "%03d" # name of input files, e.g., %03d if files are named 001.png, 002.png
inputFileExt = "png" # extension of input files (without .), e.g., png, jpg
flowFwdDir = "flow_fwd" # path to the output forward flow files
flowBwdDir = "flow_bwd" # path to the output backward flow files
FIRST = 1 # number of the first PNG file in the input folder
LAST = 109 # number of the last PNG file in the input folder
####################################################
if not os.path.exists(flowFwdDir):
os.mkdir(flowFwdDir)
if not os.path.exists(flowBwdDir):
os.mkdir(flowBwdDir)
inputFiles = inputDir + "/" + inputFileFormat + "." + inputFileExt
flwFwdFile = flowFwdDir + "/" + inputFileFormat + ".A2V2f"
flwBwdFile = flowBwdDir + "/" + inputFileFormat + ".A2V2f"
firstFrame = FIRST+1
lastFrame = LAST
frameStep = +1
for frame in range(firstFrame,lastFrame+frameStep,frameStep):
os.system("disflow %s %s %s"%(inputFiles%(frame),inputFiles%(frame-frameStep),flwFwdFile%(frame)))
firstFrame = LAST-1
lastFrame = FIRST
frameStep = -1
for frame in range(firstFrame,lastFrame+frameStep,frameStep):
os.system("disflow %s %s %s"%(inputFiles%(frame),inputFiles%(frame-frameStep),flwBwdFile%(frame)))
|
the-stack_106_23588 | """options table constraint
Revision ID: b531c6dadfcf
Revises: 8af57a53dc4e
Create Date: 2019-11-15 17:22:40.911136
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b531c6dadfcf'
down_revision = '8af57a53dc4e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('chat')
op.drop_index('ix_option_chat_key', table_name='chat_option')
op.drop_table('chat_option')
op.drop_index('ix_chat_hash', table_name='photo')
op.drop_index('ix_user_hash_date', table_name='photo')
op.drop_table('photo')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('photo',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('file_hash', sa.VARCHAR(length=240), nullable=False),
sa.Column('chat_id', sa.INTEGER(), nullable=False),
sa.Column('user_id', sa.INTEGER(), nullable=False),
sa.Column('msg_date', sa.DATETIME(), nullable=False),
sa.Column('msg_id', sa.INTEGER(), nullable=False),
sa.Column('yd_path', sa.VARCHAR(length=240), nullable=False),
sa.ForeignKeyConstraint(['chat_id'], ['chat.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_user_hash_date', 'photo', ['user_id', 'file_hash', 'msg_date'], unique=False)
op.create_index('ix_chat_hash', 'photo', ['chat_id', 'file_hash'], unique=False)
op.create_table('chat_option',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('chat_id', sa.INTEGER(), nullable=True),
sa.Column('key', sa.VARCHAR(length=50), nullable=False),
sa.Column('value', sa.VARCHAR(length=240), nullable=True),
sa.ForeignKeyConstraint(['chat_id'], ['chat.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_option_chat_key', 'chat_option', ['chat_id', 'key'], unique=False)
op.create_table('chat',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=240), nullable=False),
sa.Column('local_folder', sa.VARCHAR(length=240), nullable=False),
sa.Column('yd_folder', sa.VARCHAR(length=240), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
the-stack_106_23589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
from src import InstaBot
from src.check_status import check_status
from src.feed_scanner import feed_scanner
from src.follow_protocol import follow_protocol
from src.unfollow_protocol import unfollow_protocol
bot = InstaBot(
login="timeless_hiphop",
password="dwPW1506",
like_per_day=750,
comments_per_day=0,
tag_list=['boombap', 'boombaphiphop', 'oldschoolrap', 'oldschoolhiphop','classichiphop','realrap', 'mosdef','nas','wutangclan','wutangforever','joeybadass','mfdoom'],
tag_blacklist=['rain', 'thunderstorm'],
user_blacklist={},
max_like_for_one_tag=50,
follow_per_day=500,
follow_time=1 * 60,
unfollow_per_day=500,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy='mschun:[email protected]:29842',
# List of list of words, each of which will be used to generate comment
# For example: "This shot feels wow!"
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good", "wow",
"WOW", "cool", "GREAT","magnificent", "magical",
"very cool", "stylish", "beautiful", "so beautiful",
"so stylish", "so professional", "lovely",
"so lovely", "very lovely", "glorious","so glorious",
"very glorious", "adorable", "excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
# Use unwanted_username_list to block usernames containing a string
## Will do partial matches; i.e. 'mozart' will block 'legend_mozart'
### 'free_followers' will be blocked because it contains 'free'
unwanted_username_list=[
'second', 'stuff', 'art', 'project', 'love', 'life', 'food', 'blog',
'free', 'keren', 'photo', 'graphy', 'indo', 'travel', 'art', 'shop',
'store', 'sex', 'toko', 'jual', 'online', 'murah', 'jam', 'kaos',
'case', 'baju', 'fashion', 'corp', 'tas', 'butik', 'grosir', 'karpet',
'sosis', 'salon', 'skin', 'care', 'cloth', 'tech', 'rental', 'kamera',
'beauty', 'express', 'kredit', 'collection', 'impor', 'preloved',
'follow', 'follower', 'gain', '.id', '_id', 'bags'
],
unfollow_whitelist=['example_user_1', 'example_user_2'])
while True:
#print("# MODE 0 = ORIGINAL MODE BY LEVPASHA")
#print("## MODE 1 = MODIFIED MODE BY KEMONG")
#print("### MODE 2 = ORIGINAL MODE + UNFOLLOW WHO DON'T FOLLOW BACK")
#print("#### MODE 3 = MODIFIED MODE : UNFOLLOW USERS WHO DON'T FOLLOW YOU BASED ON RECENT FEED")
#print("##### MODE 4 = MODIFIED MODE : FOLLOW USERS BASED ON RECENT FEED ONLY")
#print("###### MODE 5 = MODIFIED MODE : JUST UNFOLLOW EVERYBODY, EITHER YOUR FOLLOWER OR NOT")
################################
## WARNING ###
################################
# DON'T USE MODE 5 FOR A LONG PERIOD. YOU RISK YOUR ACCOUNT FROM GETTING BANNED
## USE MODE 5 IN BURST MODE, USE IT TO UNFOLLOW PEOPLE AS MANY AS YOU WANT IN SHORT TIME PERIOD
mode = 2
#print("You choose mode : %i" %(mode))
#print("CTRL + C to cancel this operation or wait 30 seconds to start")
#time.sleep(30)
if mode == 0:
bot.new_auto_mod()
elif mode == 1:
check_status(bot)
while bot.self_following - bot.self_follower > 200:
unfollow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
while bot.self_following - bot.self_follower < 400:
while len(bot.user_info_list) < 50:
feed_scanner(bot)
time.sleep(5 * 60)
follow_protocol(bot)
time.sleep(10 * 60)
check_status(bot)
elif mode == 2:
bot.bot_mode = 1
bot.new_auto_mod()
elif mode == 3:
bot.bot_mode = 3
while(bot.login_status == 1):
bot.unfollow_recent_feed()
time.sleep(5)
elif mode == 4:
feed_scanner(bot)
time.sleep(60)
follow_protocol(bot)
time.sleep(10 * 60)
elif mode == 5:
bot.bot_mode = 2
unfollow_protocol(bot)
else:
print("Wrong mode!")
|
the-stack_106_23590 | import gym
class ContinuingTimeLimit(gym.Wrapper):
"""TimeLimit wrapper for continuing environments.
This is similar gym.wrappers.TimeLimit, which sets a time limit for
each episode, except that done=False is returned and that
info['needs_reset'] is set to True when past the limit.
Code that calls env.step is responsible for checking the info dict, the
fourth returned value, and resetting the env if it has the 'needs_reset'
key and its value is True.
Args:
env (gym.Env): Env to wrap.
max_episode_steps (int): Maximum number of timesteps during an episode,
after which the env needs a reset.
"""
def __init__(self, env, max_episode_steps):
super(ContinuingTimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(self, action):
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
if self._max_episode_steps <= self._elapsed_steps:
info["needs_reset"] = True
return observation, reward, done, info
def reset(self):
self._elapsed_steps = 0
return self.env.reset()
|
the-stack_106_23591 | # %% [markdown]
# # Inequality constraints
# %%
import numpy as np
import tensorflow as tf
np.random.seed(1799)
tf.random.set_seed(1799)
# %% [markdown]
# ## The problem
#
# In this tutorial, we replicate one of the results of <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, specifically their synthetic experiment "simulation 1", which consists of an objective function with a single constraint, defined over a two-dimensional input domain. We'll start by defining the problem parameters. The constraint is satisfied when `constraint(input_data) <= threshold`.
# %%
from trieste.space import Box
class Sim:
threshold = 0.5
@staticmethod
def objective(input_data):
x, y = input_data[..., -2], input_data[..., -1]
z = tf.cos(2.0 * x) * tf.cos(y) + tf.sin(x)
return z[:, None]
@staticmethod
def constraint(input_data):
x, y = input_data[:, -2], input_data[:, -1]
z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
return z[:, None]
search_space = Box([0, 0], [6, 6])
# %% [markdown]
# The objective and constraint functions are accessible as methods on the `Sim` class. Let's visualise these functions, as well as the constrained objective. We get the constrained objective by masking out regions where the constraint function is above the threshold.
# %%
import trieste
import matplotlib.pyplot as plt
from util.inequality_constraints_utils import plot_objective_and_constraints
plot_objective_and_constraints(search_space, Sim)
plt.show()
# %% [markdown]
# We'll make an observer that outputs both the objective and constraint data. Since the observer is outputting multiple datasets, we have to label them so that the optimization process knows which is which.
# %%
from trieste.data import Dataset
OBJECTIVE = "OBJECTIVE"
CONSTRAINT = "CONSTRAINT"
def observer(query_points):
return {
OBJECTIVE: Dataset(query_points, Sim.objective(query_points)),
CONSTRAINT: Dataset(query_points, Sim.constraint(query_points)),
}
# %% [markdown]
# Let's randomly sample some initial data from the observer ...
# %%
num_initial_points = 5
initial_data = observer(search_space.sample(num_initial_points))
# %% [markdown]
# ... and visualise those points on the constrained objective. Note how the generated data is labelled, like the observer.
# %%
from util.inequality_constraints_utils import plot_init_query_points
plot_init_query_points(
search_space,
Sim,
initial_data[OBJECTIVE].astuple(),
initial_data[CONSTRAINT].astuple(),
)
plt.show()
# %% [markdown]
# ## Modelling the two functions
#
# We'll model the objective and constraint data with their own Gaussian process regression model, as implemented in GPflow. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
from trieste.models.gpflow import build_gpr, GaussianProcessRegression
def create_bo_model(data):
gpr = build_gpr(data, search_space, likelihood_variance=1e-7)
return GaussianProcessRegression(gpr)
initial_models = trieste.utils.map_values(create_bo_model, initial_data)
# %% [markdown]
# ## Define the acquisition process
#
# We can construct the _expected constrained improvement_ acquisition function defined in <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, where they use the probability of feasibility with respect to the constraint model.
# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
pof = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim.threshold)
eci = trieste.acquisition.ExpectedConstrainedImprovement(
OBJECTIVE, pof.using(CONSTRAINT)
)
rule = EfficientGlobalOptimization(eci) # type: ignore
# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop. We obtain the final objective and constraint data using `.try_get_final_datasets()`.
# %%
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
data = bo.optimize(
num_steps, initial_data, initial_models, rule, track_state=False
).try_get_final_datasets()
# %% [markdown]
# To conclude this section, we visualise the resulting data. Orange dots show the new points queried during optimization. Notice the concentration of these points in regions near the local minima.
# %%
constraint_data = data[CONSTRAINT]
new_query_points = constraint_data.query_points[-num_steps:]
new_observations = constraint_data.observations[-num_steps:]
new_data = (new_query_points, new_observations)
plot_init_query_points(
search_space,
Sim,
initial_data[OBJECTIVE].astuple(),
initial_data[CONSTRAINT].astuple(),
new_data,
)
plt.show()
# %% [markdown]
# ## Batch-sequential strategy
#
# We'll now look at a batch-sequential approach to the same problem. Sometimes it's beneficial to query several points at a time instead of one. The acquisition function we used earlier, built by `ExpectedConstrainedImprovement`, only supports a batch size of 1, so we'll need a new acquisition function builder for larger batch sizes. We can implement this using the reparametrization trick with the Monte-Carlo sampler `BatchReparametrizationSampler`. Note that when we do this, we must initialise the sampler *outside* the acquisition function (here `batch_efi`). This is crucial: a given instance of a sampler produces repeatable, continuous samples, and we can use this to create a repeatable continuous acquisition function. Using a new sampler on each call would not result in a repeatable continuous acquisition function.
# %%
class BatchExpectedConstrainedImprovement(
trieste.acquisition.AcquisitionFunctionBuilder
):
def __init__(self, sample_size, threshold):
self._sample_size = sample_size
self._threshold = threshold
def prepare_acquisition_function(self, models, datasets):
objective_model = models[OBJECTIVE]
objective_dataset = datasets[OBJECTIVE]
samplers = {
tag: trieste.models.gpflow.BatchReparametrizationSampler(
self._sample_size, model
)
for tag, model in models.items()
}
pf = trieste.acquisition.probability_of_feasibility(
models[CONSTRAINT], self._threshold
)(tf.expand_dims(objective_dataset.query_points, 1))
is_feasible = pf >= 0.5
mean, _ = objective_model.predict(objective_dataset.query_points)
eta = tf.reduce_min(tf.boolean_mask(mean, is_feasible), axis=0)
def batch_efi(at):
samples = {
tag: tf.squeeze(sampler.sample(at), -1)
for tag, sampler in samplers.items()
}
feasible_mask = samples[CONSTRAINT] < self._threshold # [N, S, B]
improvement = tf.where(
feasible_mask, tf.maximum(eta - samples[OBJECTIVE], 0.0), 0.0
) # [N, S, B]
batch_improvement = tf.reduce_max(improvement, axis=-1) # [N, S]
return tf.reduce_mean(
batch_improvement, axis=-1, keepdims=True
) # [N, 1]
return batch_efi
num_query_points = 4
sample_size = 50
batch_eci = BatchExpectedConstrainedImprovement(sample_size, Sim.threshold)
batch_rule = EfficientGlobalOptimization( # type: ignore
batch_eci, num_query_points=num_query_points
)
# %% [markdown]
# We can now run the BO loop as before; note that here we also query twenty points, but in five batches of four points.
# %%
initial_models = trieste.utils.map_values(create_bo_model, initial_data)
num_steps = 5
batch_data = bo.optimize(
num_steps, initial_data, initial_models, batch_rule, track_state=False
).try_get_final_datasets()
# %% [markdown]
# We visualise the resulting data as before.
# %%
batch_constraint_data = batch_data[CONSTRAINT]
new_batch_data = (
batch_constraint_data.query_points[-num_query_points * num_steps :],
batch_constraint_data.observations[-num_query_points * num_steps :],
)
plot_init_query_points(
search_space,
Sim,
initial_data[OBJECTIVE].astuple(),
initial_data[CONSTRAINT].astuple(),
new_batch_data,
)
plt.show()
# %% [markdown]
# Finally, we compare the regret from the non-batch strategy (left panel) to the regret from the batch strategy (right panel).
# In the following plots each marker represents a query point. The x-axis is the index of the query point (where the first queried point has index 0), and the y-axis is the observed value. The vertical blue line denotes the end of initialisation/start of optimisation. Green points satisfy the constraint, red points do not.
# %%
from util.plotting import plot_regret
mask_fail = constraint_data.observations.numpy() > Sim.threshold
batch_mask_fail = batch_constraint_data.observations.numpy() > Sim.threshold
fig, ax = plt.subplots(1, 2, sharey="all")
plot_regret(
data[OBJECTIVE].observations.numpy(),
ax[0],
num_init=num_initial_points,
mask_fail=mask_fail.flatten(),
)
plot_regret(
batch_data[OBJECTIVE].observations.numpy(),
ax[1],
num_init=num_initial_points,
mask_fail=batch_mask_fail.flatten(),
)
# %% [markdown]
# ## Constrained optimization with more than one constraint
#
# We'll now show how to use a reducer to combine multiple constraints. The new problem `Sim2` inherits from the previous one its objective and first constraint, but also adds a second constraint. We start by adding an output to our observer, and creating a set of three models.
# %%
class Sim2(Sim):
threshold2 = 0.5
@staticmethod
def constraint2(input_data):
x, y = input_data[:, -2], input_data[:, -1]
z = tf.sin(x) * tf.cos(y) - tf.cos(x) * tf.sin(y)
return z[:, None]
CONSTRAINT2 = "CONSTRAINT2"
def observer_two_constraints(query_points):
return {
OBJECTIVE: Dataset(query_points, Sim2.objective(query_points)),
CONSTRAINT: Dataset(query_points, Sim2.constraint(query_points)),
CONSTRAINT2: Dataset(query_points, Sim2.constraint2(query_points)),
}
num_initial_points = 10
initial_data = observer_two_constraints(search_space.sample(num_initial_points))
initial_models = trieste.utils.map_values(create_bo_model, initial_data)
# %% [markdown]
# Now, the probability that the two constraints are feasible is the product of the two feasibilities. Hence, we combine the two `ProbabilityOfFeasibility` functions into one quantity by using a `Product` `Reducer`:
# %%
from trieste.acquisition.combination import Product
pof1 = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim2.threshold)
pof2 = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim2.threshold2)
pof = Product(pof1.using(CONSTRAINT), pof2.using(CONSTRAINT2)) # type: ignore
# %% [markdown]
# We can now run the BO loop as before, and visualize the results:
# %%
eci = trieste.acquisition.ExpectedConstrainedImprovement(OBJECTIVE, pof) # type: ignore
rule = EfficientGlobalOptimization(eci)
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(
observer_two_constraints, search_space
)
data = bo.optimize(
num_steps, initial_data, initial_models, rule, track_state=False
).try_get_final_datasets()
constraint_data = data[CONSTRAINT]
new_query_points = constraint_data.query_points[-num_steps:]
new_observations = constraint_data.observations[-num_steps:]
new_data = (new_query_points, new_observations)
def masked_objective(x):
mask_nan = np.logical_or(
Sim2.constraint(x) > Sim2.threshold,
Sim2.constraint2(x) > Sim2.threshold2,
)
y = np.array(Sim2.objective(x))
y[mask_nan] = np.nan
return tf.convert_to_tensor(y.reshape(-1, 1), x.dtype)
mask_fail1 = (
data[CONSTRAINT].observations.numpy().flatten().astype(int) > Sim2.threshold
)
mask_fail2 = (
data[CONSTRAINT].observations.numpy().flatten().astype(int)
> Sim2.threshold2
)
mask_fail = np.logical_or(mask_fail1, mask_fail2)
import matplotlib.pyplot as plt
from util.plotting import plot_function_2d, plot_bo_points
fig, ax = plot_function_2d(
masked_objective,
search_space.lower,
search_space.upper,
grid_density=50,
contour=True,
)
plot_bo_points(
data[OBJECTIVE].query_points.numpy(),
ax=ax[0, 0],
num_init=num_initial_points,
mask_fail=mask_fail,
)
plt.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
|
the-stack_106_23592 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for comparing semantic versions.
Basic rules of semver:
Format: major.minor.patch-prerelease+build
major, minor, patch, must all be present and integers with no leading zeros.
They are compared numerically by segment.
prerelease is an optional '.' separated series of identifiers where each is
either an integer with no leading zeros, or an alphanumeric string
(including '-'). Prereleases are compared by comparing each identifier in
order. Integers are compared numerically, alphanumeric strings are compared
lexigraphically. A prerelease version is lower precedence than it's associated
normal version.
The build number is optional and not included in the comparison. It is '.'
separated series of alphanumeric identifiers.
Two SemVer objects are considered equal if they represent the exact same string
(including the build number and including case differences). For comparison
operators, we follow the SemVer spec of precedence and ignore the build number
and case of alphanumeric strings.
"""
import re
# Only digits, with no leading zeros.
_DIGITS = r'(?:0|[1-9][0-9]*)'
# Digits, letters and dashes
_ALPHA_NUM = r'[-0-9A-Za-z]+'
# This is an alphanumeric string that must have at least once letter (or else it
# would be considered digits).
_STRICT_ALPHA_NUM = r'[-0-9A-Za-z]*[-A-Za-z]+[-0-9A-Za-z]*'
_PRE_RELEASE_IDENTIFIER = r'(?:{0}|{1})'.format(_DIGITS, _STRICT_ALPHA_NUM)
_PRE_RELEASE = r'(?:{0}(?:\.{0})*)'.format(_PRE_RELEASE_IDENTIFIER)
_BUILD = r'(?:{0}(?:\.{0})*)'.format(_ALPHA_NUM)
_SEMVER = (
r'^(?P<major>{digits})\.(?P<minor>{digits})\.(?P<patch>{digits})'
r'(?:\-(?P<prerelease>{release}))?(?:\+(?P<build>{build}))?$'
).format(digits=_DIGITS, release=_PRE_RELEASE, build=_BUILD)
class ParseError(Exception):
"""An exception for when a string failed to parse as a valid semver."""
pass
class SemVer(object):
"""Object to hold a parsed semantic version string."""
def __init__(self, version):
"""Creates a SemVer object from the given version string.
Args:
version: str, The version string to parse.
Raises:
ParseError: If the version could not be correctly parsed.
Returns:
SemVer, The parsed version.
"""
(self.major, self.minor, self.patch, self.prerelease, self.build) = (
SemVer._FromString(version))
@classmethod
def _FromString(cls, version):
"""Parse the given version string into its parts."""
if version is None:
raise ParseError('The value is not a valid SemVer string: [None]')
try:
match = re.match(_SEMVER, version)
except (TypeError, re.error) as e:
raise ParseError('Error parsing version string: [{0}]. {1}'
.format(version, e.message))
if not match:
raise ParseError(
'The value is not a valid SemVer string: [{0}]'.format(version))
parts = match.groupdict()
return (
int(parts['major']), int(parts['minor']), int(parts['patch']),
parts['prerelease'], parts['build'])
@classmethod
def _ComparePrereleaseStrings(cls, s1, s2):
"""Compares the two given prerelease strings.
Args:
s1: str, The first prerelease string.
s2: str, The second prerelease string.
Returns:
1 if s1 is greater than s2, -1 if s2 is greater than s1, and 0 if equal.
"""
# No prerelease is greater than any version with a prerelease.
if s1 is None and s2 is not None:
return 1
if s2 is None and s1 is not None:
return -1
# If both are the same (including None), they are equal
if s1 == s2:
return 0
# Convert numeric segments into ints for numerical comparison.
to_comparable = lambda part: int(part) if part.isdigit() else part.lower()
# Split the version by dots so each part can be compared.
get_parts = lambda s: [to_comparable(part) for part in s.split('.')]
return cmp(get_parts(s1), get_parts(s2))
def _Compare(self, other):
"""Compare this SemVer to other.
Args:
other: SemVer, the other version to compare this one to.
Returns:
1 if self > other, -1 if other > self, 0 if equal.
"""
# Compare the required parts.
result = cmp(
(self.major, self.minor, self.patch),
(other.major, other.minor, other.patch))
# Only if required parts are equal, compare the prerelease strings.
# Never include build number in comparison.
result = result or SemVer._ComparePrereleaseStrings(
self.prerelease, other.prerelease)
return result
def Distance(self, other):
"""Compare this SemVer to other and returns the distances.
Args:
other: SemVer, the other version to compare this one to.
Returns:
Distances between the major, minor and patch versions.
"""
major_diff = self.major - other.major
minor_diff = self.minor - other.minor
patch_diff = self.patch - other.patch
return major_diff, minor_diff, patch_diff
def __eq__(self, other):
return (
(self.major, self.minor, self.patch, self.prerelease, self.build) ==
(other.major, other.minor, other.patch, other.prerelease, other.build))
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._Compare(other) > 0
def __lt__(self, other):
return self._Compare(other) < 0
def __ge__(self, other):
return not self < other
def __le__(self, other):
return not self > other
|
the-stack_106_23593 | from setuptools import setup
with open('README.rst') as f:
long_description = f.read()
setup(
name="gfwlist2dnsmasq",
version="0.1.0",
license='MIT',
description="convert gfwlist to dnsmasq config file",
author='nucbill',
author_email='[email protected]',
url='https://github.com/nucbill/gfwlist2dnsmasq',
packages=['gfwlist2dnsmasq', 'gfwlist2dnsmasq.resources'],
package_data={
'gfwlist2dnsmasq': ['README.rst', 'LICENSE', 'resources/*']
},
install_requires=[],
entry_points="""
[console_scripts]
gfwlist2dnsmasq = gfwlist2dnsmasq.main:main
""",
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
long_description=long_description,
)
|
the-stack_106_23596 | from rest_framework import serializers
from .models import User, Photo
"""
Code from:
http://stackoverflow.com/questions/28036404/django-rest-framework-upload-image-the-submitted-data-was-not-a-file#28036805
"""
class Base64ImageField(serializers.ImageField):
"""
A Django REST framework field for handling image-uploads through raw post data.
It uses base64 for encoding and decoding the contents of the file.
Heavily based on
https://github.com/tomchristie/django-rest-framework/pull/1268
Updated for Django REST framework 3.
"""
def to_internal_value(self, data):
from django.core.files.base import ContentFile
import base64
import six
import uuid
if isinstance(data, six.string_types):
if 'data:' in data and ';base64,' in data:
header, data = data.split(';base64,')
try:
decoded_file = base64.b64decode(data)
except TypeError:
self.fail('invalid_image')
file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough.
file_extension = self.get_file_extension(file_name, decoded_file)
complete_file_name = "%s.%s" % (file_name, file_extension, )
data = ContentFile(decoded_file, name=complete_file_name)
return super(Base64ImageField, self).to_internal_value(data)
def get_file_extension(self, file_name, decoded_file):
import imghdr
extension = imghdr.what(file_name, decoded_file)
extension = "jpg" if extension == "jpeg" else extension
return extension
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class PhotoSerializer(serializers.ModelSerializer):
image = Base64ImageField(max_length=None, use_url=True)
class Meta:
model = Photo
fields = '__all__' |
the-stack_106_23601 | import torch
import torch.nn.functional as F
from torchvision import models
import numpy as np
import cv2
import os, sys
from torchsummary import summary
######## config #############
test_images_path = sys.argv[1]
classes = ('chair', 'people')
input_shape = (3, 224, 224)
cards_id = [2, 3]
param_save_path = sys.argv[2]
onnx_out_name = "out/classifier.onnx"
ncnn_out_param = "out/classifier.param"
ncnn_out_bin = "out/classifier.bin"
os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(f"{id}" for id in cards_id)
def load_data(path, shape):
data = []
exts = [".jpg", ".jpeg", ".png"]
files = os.listdir(path)
files = sorted(files)
for name in files:
if not os.path.splitext(name.lower())[1] in exts:
continue
img = cv2.imread(os.path.join(path, name))
if type(img) == type(None):
print("read file {} fail".format(os.path.join(path, name)))
continue
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (shape[2], shape[1]))
img = np.transpose(img, (2, 0, 1)).astype(np.float32) # hwc to chw layout
img = (img - 127.5) * 0.0078125
data.append((torch.from_numpy(img), name))
return data
class Dataset:
def __init__(self, data):
self.data = data
def __getitem__(self, i):
return self.data[i]
def __len__(self) -> int:
return len(self.data)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
testset = load_data(test_images_path, input_shape)
testset = Dataset(testset)
net = models.resnet18(pretrained=False, num_classes = len(classes))
net.to(device)
net.load_state_dict(torch.load(param_save_path))
# summary(net, input_size=input_shape)
with torch.no_grad():
for i, data in enumerate(testset):
input, name = data
input = input.to(device)
outputs = net(input.unsqueeze(0))
result = F.softmax(outputs[0], dim=0)
print("image: {}, raw output: {}, ".format(name, outputs[0]), end="")
for i, label in enumerate(classes):
print("{}: {:.3f}, ".format(label, result[i]), end="")
print("")
print("export model")
from convert import torch_to_onnx, onnx_to_ncnn
if not os.path.exists("out"):
os.makedirs("out")
with torch.no_grad():
torch_to_onnx(net.to("cpu"), input_shape, out_name=onnx_out_name, device="cpu")
onnx_to_ncnn(input_shape, onnx=onnx_out_name, ncnn_param=ncnn_out_param, ncnn_bin=ncnn_out_bin)
|
the-stack_106_23603 | # pulled from the AWSCLI and modified by Globus
# note that this file has also been modified by auotfixers (e.g. black, pyupgrade)
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import errno
import sys
def _format_text(item, stream, identifier=None, scalar_keys=None):
if isinstance(item, dict):
_format_dict(scalar_keys, item, identifier, stream)
elif isinstance(item, list):
_format_list(item, identifier, stream)
else:
# If it's not a list or a dict, we just write the scalar
# value out directly.
stream.write(str(item))
stream.write("\n")
def _format_list(item, identifier, stream):
if not item:
return
if any(isinstance(el, dict) for el in item):
all_keys = _all_scalar_keys(item)
for element in item:
_format_text(
element, stream=stream, identifier=identifier, scalar_keys=all_keys
)
elif any(isinstance(el, list) for el in item):
scalar_elements, non_scalars = _partition_list(item)
if scalar_elements:
_format_scalar_list(scalar_elements, identifier, stream)
for non_scalar in non_scalars:
_format_text(non_scalar, stream=stream, identifier=identifier)
else:
_format_scalar_list(item, identifier, stream)
def _partition_list(item):
scalars = []
non_scalars = []
for element in item:
if isinstance(element, (list, dict)):
non_scalars.append(element)
else:
scalars.append(element)
return scalars, non_scalars
def _format_scalar_list(elements, identifier, stream):
if identifier is not None:
for item in elements:
stream.write(f"{identifier.upper()}\t{item}\n")
else:
# For a bare list, just print the contents.
stream.write("\t".join([str(item) for item in elements]))
stream.write("\n")
def _format_dict(scalar_keys, item, identifier, stream):
scalars, non_scalars = _partition_dict(item, scalar_keys=scalar_keys)
if scalars:
if identifier is not None:
scalars.insert(0, identifier.upper())
stream.write("\t".join(scalars))
stream.write("\n")
for new_identifier, non_scalar in non_scalars:
_format_text(item=non_scalar, stream=stream, identifier=new_identifier)
def _all_scalar_keys(list_of_dicts):
keys_seen = set()
for item_dict in list_of_dicts:
for key, value in item_dict.items():
if not isinstance(value, (dict, list)):
keys_seen.add(key)
return list(sorted(keys_seen))
def _partition_dict(item_dict, scalar_keys):
# Given a dictionary, partition it into two list based on the
# values associated with the keys.
# {'foo': 'scalar', 'bar': 'scalar', 'baz': ['not, 'scalar']}
# scalar = [('foo', 'scalar'), ('bar', 'scalar')]
# non_scalar = [('baz', ['not', 'scalar'])]
scalar = []
non_scalar = []
if scalar_keys is None:
# scalar_keys can have more than just the keys in the item_dict,
# but if user does not provide scalar_keys, we'll grab the keys
# from the current item_dict
for key, value in sorted(item_dict.items()):
if isinstance(value, (dict, list)):
non_scalar.append((key, value))
else:
scalar.append(str(value))
else:
for key in scalar_keys:
scalar.append(str(item_dict.get(key, "")))
remaining_keys = sorted(set(item_dict.keys()) - set(scalar_keys))
for remaining_key in remaining_keys:
non_scalar.append((remaining_key, item_dict[remaining_key]))
return scalar, non_scalar
def unix_formatted_print(data, stream=sys.stdout):
_format_text(data, stream)
try:
sys.stdout.flush()
except OSError as err:
if err.errno is errno.EPIPE:
pass
else:
raise
|
the-stack_106_23604 | import pytest
from eth_utils import (
ValidationError,
)
from eth2.beacon.committee_helpers import (
get_beacon_proposer_index,
)
from eth2.beacon.configs import (
CommitteeConfig,
)
from eth2.beacon.helpers import (
get_epoch_start_slot,
)
from eth2.beacon.types.blocks import (
BeaconBlockBody,
)
from eth2.beacon.state_machines.forks.serenity.blocks import (
SerenityBeaconBlock,
)
from eth2.beacon.state_machines.forks.serenity.operation_processing import (
process_attestations,
process_proposer_slashings,
process_attester_slashings,
process_voluntary_exits,
)
from eth2.beacon.tools.builder.validator import (
create_mock_attester_slashing_is_double_vote,
create_mock_signed_attestations_at_slot,
create_mock_proposer_slashing_at_block,
create_mock_voluntary_exit,
)
def test_process_max_attestations(genesis_state,
genesis_block,
sample_beacon_block_params,
sample_beacon_block_body_params,
config,
keymap):
attestation_slot = config.GENESIS_SLOT
current_slot = attestation_slot + config.MIN_ATTESTATION_INCLUSION_DELAY
state = genesis_state.copy(
slot=current_slot,
)
attestations = create_mock_signed_attestations_at_slot(
state=state,
config=config,
attestation_slot=attestation_slot,
beacon_block_root=genesis_block.root,
keymap=keymap,
voted_attesters_ratio=1.0,
)
attestations_count = len(attestations)
assert attestations_count > 0
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
attestations=attestations * (attestations_count // config.MAX_ATTESTATIONS + 1),
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=current_slot,
body=block_body,
)
with pytest.raises(ValidationError):
process_attestations(
state,
block,
config,
)
@pytest.mark.parametrize(
(
'num_validators',
'slots_per_epoch',
'target_committee_size',
'shard_count',
'block_root_1',
'block_root_2',
'success'
),
[
(10, 2, 2, 2, b'\x11' * 32, b'\x22' * 32, True),
(10, 2, 2, 2, b'\x11' * 32, b'\x11' * 32, False),
]
)
def test_process_proposer_slashings(genesis_state,
sample_beacon_block_params,
sample_beacon_block_body_params,
config,
keymap,
block_root_1,
block_root_2,
success):
current_slot = config.GENESIS_SLOT + 1
state = genesis_state.copy(
slot=current_slot,
)
whistleblower_index = get_beacon_proposer_index(
state,
state.slot,
CommitteeConfig(config),
)
slashing_proposer_index = (whistleblower_index + 1) % len(state.validator_registry)
proposer_slashing = create_mock_proposer_slashing_at_block(
state,
config,
keymap,
block_root_1=block_root_1,
block_root_2=block_root_2,
proposer_index=slashing_proposer_index,
)
proposer_slashings = (proposer_slashing,)
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
proposer_slashings=proposer_slashings,
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=current_slot,
body=block_body,
)
if success:
new_state = process_proposer_slashings(
state,
block,
config,
)
# Check if slashed
assert (
new_state.validator_balances[slashing_proposer_index] <
state.validator_balances[slashing_proposer_index]
)
else:
with pytest.raises(ValidationError):
process_proposer_slashings(
state,
block,
config,
)
@pytest.mark.parametrize(
(
'num_validators',
'slots_per_epoch',
'target_committee_size',
'shard_count',
'min_attestation_inclusion_delay',
),
[
(100, 2, 2, 2, 1),
]
)
@pytest.mark.parametrize(
('success'),
[
# (True),
(False),
]
)
def test_process_attester_slashings(genesis_state,
sample_beacon_block_params,
sample_beacon_block_body_params,
config,
keymap,
min_attestation_inclusion_delay,
success):
attesting_state = genesis_state.copy(
slot=genesis_state.slot + config.SLOTS_PER_EPOCH,
)
valid_attester_slashing = create_mock_attester_slashing_is_double_vote(
attesting_state,
config,
keymap,
attestation_epoch=0,
)
state = attesting_state.copy(
slot=attesting_state.slot + min_attestation_inclusion_delay,
)
if success:
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
attester_slashings=(valid_attester_slashing,),
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=state.slot,
body=block_body,
)
attester_index = valid_attester_slashing.slashable_attestation_1.validator_indices[0]
new_state = process_attester_slashings(
state,
block,
config,
)
# Check if slashed
assert (
new_state.validator_balances[attester_index] < state.validator_balances[attester_index]
)
else:
invalid_attester_slashing = valid_attester_slashing.copy(
slashable_attestation_2=valid_attester_slashing.slashable_attestation_2.copy(
data=valid_attester_slashing.slashable_attestation_1.data,
)
)
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
attester_slashings=(invalid_attester_slashing,),
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=state.slot,
body=block_body,
)
with pytest.raises(ValidationError):
process_attester_slashings(
state,
block,
config,
)
@pytest.mark.parametrize(
(
'num_validators,'
'slots_per_epoch,'
'min_attestation_inclusion_delay,'
'target_committee_size,'
'shard_count,'
'success,'
'genesis_slot,'
),
[
(10, 2, 1, 2, 2, True, 0),
(10, 2, 1, 2, 2, False, 0),
(40, 4, 2, 3, 5, True, 0),
]
)
def test_process_attestations(genesis_state,
genesis_block,
sample_beacon_block_params,
sample_beacon_block_body_params,
config,
keymap,
success):
attestation_slot = 0
current_slot = attestation_slot + config.MIN_ATTESTATION_INCLUSION_DELAY
state = genesis_state.copy(
slot=current_slot,
)
attestations = create_mock_signed_attestations_at_slot(
state=state,
config=config,
attestation_slot=attestation_slot,
beacon_block_root=genesis_block.root,
keymap=keymap,
voted_attesters_ratio=1.0,
)
assert len(attestations) > 0
if not success:
# create invalid attestation in the future
invalid_attestation_data = attestations[-1].data.copy(
slot=state.slot + 10,
)
invalid_attestation = attestations[-1].copy(
data=invalid_attestation_data,
)
attestations = attestations[:-1] + (invalid_attestation,)
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
attestations=attestations,
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=current_slot,
body=block_body,
)
if success:
new_state = process_attestations(
state,
block,
config,
)
assert len(new_state.current_epoch_attestations) == len(attestations)
else:
with pytest.raises(ValidationError):
process_attestations(
state,
block,
config,
)
@pytest.mark.parametrize(
(
'num_validators',
'slots_per_epoch',
'target_committee_size',
'activation_exit_delay',
),
[
(40, 2, 2, 2),
]
)
@pytest.mark.parametrize(
(
'success',
),
[
(True,),
(False,),
]
)
def test_process_voluntary_exits(genesis_state,
sample_beacon_block_params,
sample_beacon_block_body_params,
config,
keymap,
success):
state = genesis_state.copy(
slot=get_epoch_start_slot(
config.GENESIS_EPOCH + config.PERSISTENT_COMMITTEE_PERIOD,
config.SLOTS_PER_EPOCH,
),
)
validator_index = 0
validator = state.validator_registry[validator_index].copy(
activation_epoch=config.GENESIS_EPOCH,
)
state = state.update_validator_registry(validator_index, validator)
valid_voluntary_exit = create_mock_voluntary_exit(
state,
config,
keymap,
validator_index,
)
if success:
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
voluntary_exits=(valid_voluntary_exit,),
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=state.slot,
body=block_body,
)
new_state = process_voluntary_exits(
state,
block,
config,
)
# Check if initiated exit
assert (
new_state.validator_registry[validator_index].initiated_exit
)
else:
invalid_voluntary_exit = valid_voluntary_exit.copy(
signature=b'\x12' * 96, # Put wrong signature
)
block_body = BeaconBlockBody(**sample_beacon_block_body_params).copy(
voluntary_exits=(invalid_voluntary_exit,),
)
block = SerenityBeaconBlock(**sample_beacon_block_params).copy(
slot=state.slot,
body=block_body,
)
with pytest.raises(ValidationError):
process_voluntary_exits(
state,
block,
config,
)
|
the-stack_106_23606 | """Prepare a workflow for running on AWS using STORMSeq as a front end.
http://www.stormseq.org/
"""
import argparse
import json
import os
import yaml
from bcbio import utils
from bcbio.upload import s3
from bcbio.workflow import xprize
def parse_args(args):
parser = xprize.HelpArgParser(description="Run STORMSeq processing on AWS")
parser.add_argument("config_file", help="JSON configuration file with form parameters")
parser.add_argument("base_dir", help="Base directory to process in")
parser.add_argument("bcbio_config_file", help="bcbio system YAML config")
args = parser.parse_args(args)
return args
def _get_s3_files(local_dir, file_info, params):
"""Retrieve s3 files to local directory, handling STORMSeq inputs.
"""
assert len(file_info) == 1
files = file_info.values()[0]
fnames = []
for k in ["1", "2"]:
if files[k] not in fnames:
fnames.append(files[k])
out = []
for fname in fnames:
bucket, key = fname.replace("s3://", "").split("/", 1)
if params["access_key_id"] == "TEST":
out.append(os.path.join(local_dir, os.path.basename(key)))
else:
out.append(s3.get_file(local_dir, bucket, key, params))
return out
def setup(args):
configdir = utils.safe_makedir(os.path.join(args.base_dir, "config"))
inputdir = utils.safe_makedir(os.path.join(args.base_dir, "inputs"))
workdir = utils.safe_makedir(os.path.join(args.base_dir, "work"))
finaldir = utils.safe_makedir(os.path.join(args.base_dir, "ready"))
out_config_file = os.path.join(configdir, "%s.yaml" %
os.path.splitext(os.path.basename(args.config_file))[0])
with open(args.config_file) as in_handle:
ss_config = json.load(in_handle)
ss_params = ss_config["parameters"]
out = {"fc_date": xprize.get_fc_date(out_config_file),
"fc_name": ss_config["sample"],
"upload": {"dir": finaldir,
"method": "s3",
"bucket": ss_params["s3_bucket"],
"access_key_id": ss_params["access_key_id"],
"secret_access_key": ss_params["secret_access_key"]},
"details": [{
"files": _get_s3_files(inputdir, ss_config["files"], ss_params),
"lane": 1,
"description": ss_params["sample"],
"analysis": "variant",
"genome_build": ss_params["genome_version"],
"algorithm": {
"aligner": ss_params["alignment_pipeline"],
"variantcaller": ss_params["calling_pipeline"],
"quality_format": "Standard",
"coverage_interval": "genome" if ss_params["data_type"] == "data_wgs" else "exome",
}}]}
with open(out_config_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return workdir, {"config_file": args.bcbio_config_file,
"run_info_yaml": out_config_file}
|
the-stack_106_23607 | """Contains ECG Batch class."""
# pylint: disable=too-many-lines
import copy
from textwrap import dedent
import numpy as np
import pandas as pd
import scipy
import scipy.signal
import matplotlib.pyplot as plt
import pywt
from .. import dataset as ds
from . import kernels
from . import ecg_batch_tools as bt
from .utils import get_units_conversion_factor, partialmethod, LabelBinarizer
ACTIONS_DICT = {
"fft": (np.fft.fft, "numpy.fft.fft", "a Discrete Fourier Transform"),
"ifft": (np.fft.ifft, "numpy.fft.ifft", "an inverse Discrete Fourier Transform"),
"rfft": (np.fft.rfft, "numpy.fft.rfft", "a real-input Discrete Fourier Transform"),
"irfft": (np.fft.irfft, "numpy.fft.irfft", "a real-input inverse Discrete Fourier Transform"),
"dwt": (pywt.dwt, "pywt.dwt", "a single level Discrete Wavelet Transform"),
"idwt": (lambda x, *args, **kwargs: pywt.idwt(*x, *args, **kwargs), "pywt.idwt",
"a single level inverse Discrete Wavelet Transform"),
"wavedec": (pywt.wavedec, "pywt.wavedec", "a multilevel 1D Discrete Wavelet Transform"),
"waverec": (lambda x, *args, **kwargs: pywt.waverec(list(x), *args, **kwargs), "pywt.waverec",
"a multilevel 1D Inverse Discrete Wavelet Transform"),
"pdwt": (lambda x, part, *args, **kwargs: pywt.downcoef(part, x, *args, **kwargs), "pywt.downcoef",
"a partial Discrete Wavelet Transform data decomposition"),
"cwt": (lambda x, *args, **kwargs: pywt.cwt(x, *args, **kwargs)[0], "pywt.cwt", "a Continuous Wavelet Transform"),
}
TEMPLATE_DOCSTRING = """
Compute {description} for each slice of a signal over the axis 0
(typically the channel axis).
This method simply wraps ``apply_to_each_channel`` method by setting the
``func`` argument to ``{full_name}``.
Parameters
----------
src : str, optional
Batch attribute or component name to get the data from.
dst : str, optional
Batch attribute or component name to put the result in.
args : misc
Any additional positional arguments to ``{full_name}``.
kwargs : misc
Any additional named arguments to ``{full_name}``.
Returns
-------
batch : EcgBatch
Transformed batch. Changes ``dst`` attribute or component.
"""
TEMPLATE_DOCSTRING = dedent(TEMPLATE_DOCSTRING).strip()
def add_actions(actions_dict, template_docstring):
"""Add new actions in ``EcgBatch`` by setting ``func`` argument in
``EcgBatch.apply_to_each_channel`` method to given callables.
Parameters
----------
actions_dict : dict
A dictionary, containing new methods' names as keys and a callable,
its full name and description for each method as values.
template_docstring : str
A string, that will be formatted for each new method from
``actions_dict`` using ``full_name`` and ``description`` parameters
and assigned to its ``__doc__`` attribute.
Returns
-------
decorator : callable
Class decorator.
"""
def decorator(cls):
"""Returned decorator."""
for method_name, (func, full_name, description) in actions_dict.items():
docstring = template_docstring.format(full_name=full_name, description=description)
method = partialmethod(cls.apply_to_each_channel, func)
method.__doc__ = docstring
setattr(cls, method_name, method)
return cls
return decorator
@add_actions(ACTIONS_DICT, TEMPLATE_DOCSTRING) # pylint: disable=too-many-public-methods,too-many-instance-attributes
class EcgBatch(ds.Batch):
"""Batch class for ECG signals storing.
Contains ECG signals and additional metadata along with various processing
methods.
Parameters
----------
index : DatasetIndex
Unique identifiers of ECGs in the batch.
preloaded : tuple, optional
Data to put in the batch if given. Defaults to ``None``.
unique_labels : 1-D ndarray, optional
Array with unique labels in a dataset.
Attributes
----------
index : DatasetIndex
Unique identifiers of ECGs in the batch.
signal : 1-D ndarray
Array of 2-D ndarrays with ECG signals in channels first format.
annotation : 1-D ndarray
Array of dicts with different types of annotations.
meta : 1-D ndarray
Array of dicts with metadata about signals.
target : 1-D ndarray
Array with signals' labels.
unique_labels : 1-D ndarray
Array with unique labels in a dataset.
label_binarizer : LabelBinarizer
Object for label one-hot encoding.
Note
----
Some batch methods take ``index`` as their first argument after ``self``.
You should not specify it in your code, it will be passed automatically by
``inbatch_parallel`` decorator. For example, ``resample_signals`` method
with ``index`` and ``fs`` arguments should be called as
``batch.resample_signals(fs)``.
"""
def __init__(self, index, preloaded=None, unique_labels=None):
super().__init__(index, preloaded)
self.signal = self.array_of_nones
self.annotation = self.array_of_dicts
self.meta = self.array_of_dicts
self.target = self.array_of_nones
self._unique_labels = None
self._label_binarizer = None
self.unique_labels = unique_labels
@property
def components(self):
"""tuple of str: Data components names."""
return "signal", "annotation", "meta", "target"
@property
def array_of_nones(self):
"""1-D ndarray: ``NumPy`` array with ``None`` values."""
return np.array([None] * len(self.index))
@property
def array_of_dicts(self):
"""1-D ndarray: ``NumPy`` array with empty ``dict`` values."""
return np.array([{} for _ in range(len(self.index))])
@property
def unique_labels(self):
"""1-D ndarray: Unique labels in a dataset."""
return self._unique_labels
@unique_labels.setter
def unique_labels(self, val):
"""Set unique labels value to ``val``. Updates
``self.label_binarizer`` instance.
Parameters
----------
val : 1-D ndarray
New unique labels.
"""
self._unique_labels = val
if self.unique_labels is None or len(self.unique_labels) == 0:
self._label_binarizer = None
else:
self._label_binarizer = LabelBinarizer().fit(self.unique_labels)
@property
def label_binarizer(self):
"""LabelBinarizer: Label binarizer object for unique labels in a
dataset."""
return self._label_binarizer
def _reraise_exceptions(self, results):
"""Reraise all exceptions in the ``results`` list.
Parameters
----------
results : list
Post function computation results.
Raises
------
RuntimeError
If any paralleled action raised an ``Exception``.
"""
if ds.any_action_failed(results):
all_errors = self.get_errors(results)
raise RuntimeError("Cannot assemble the batch", all_errors)
@staticmethod
def _check_2d(signal):
"""Check if given signal is 2-D.
Parameters
----------
signal : ndarray
Signal to check.
Raises
------
ValueError
If given signal is not two-dimensional.
"""
if signal.ndim != 2:
raise ValueError("Each signal in batch must be 2-D ndarray")
# Input/output methods
@ds.action
def load(self, src=None, fmt=None, components=None, ann_ext=None, *args, **kwargs):
"""Load given batch components from source.
Most of the ``EcgBatch`` actions work under the assumption that both
``signal`` and ``meta`` components are loaded. In case this assumption
is not fulfilled, normal operation of the actions is not guaranteed.
This method supports loading of signals from WFDB, DICOM, EDF, WAV,
XML and Blosc formats.
Parameters
----------
src : misc, optional
Source to load components from.
fmt : str, optional
Source format.
components : str or array-like, optional
Components to load.
ann_ext : str, optional
Extension of the annotation file.
Returns
-------
batch : EcgBatch
Batch with loaded components. Changes batch data inplace.
"""
if components is None:
components = self.components
components = np.asarray(components).ravel()
if (fmt == "csv" or fmt is None and isinstance(src, pd.Series)) and np.all(components == "target"):
return self._load_labels(src)
if fmt in ["wfdb", "dicom", "edf", "wav", "xml"]:
return self._load_data(src=src, fmt=fmt, components=components, ann_ext=ann_ext, *args, **kwargs)
return super().load(src, fmt, components, *args, **kwargs)
@ds.inbatch_parallel(init="indices", post="_assemble_load", target="threads")
def _load_data(self, index, src=None, fmt=None, components=None, *args, **kwargs):
"""Load given components from WFDB, DICOM, EDF, WAV or XML files.
Parameters
----------
src : misc, optional
Source to load components from. Must be a collection, that can be
indexed by indices of a batch. If ``None`` and ``index`` has
``FilesIndex`` type, the path from ``index`` is used.
fmt : str, optional
Source format.
components : iterable, optional
Components to load.
ann_ext: str, optional
Extension of the annotation file.
Returns
-------
batch : EcgBatch
Batch with loaded components. Changes batch data inplace.
Raises
------
ValueError
If source path is not specified and batch's ``index`` is not a
``FilesIndex``.
"""
loaders = {
"wfdb": bt.load_wfdb,
"dicom": bt.load_dicom,
"edf": bt.load_edf,
"wav": bt.load_wav,
"xml": bt.load_xml,
}
if src is not None:
path = src[index]
elif isinstance(self.index, ds.FilesIndex):
path = self.index.get_fullpath(index) # pylint: disable=no-member
else:
raise ValueError("Source path is not specified")
return loaders[fmt](path, components, *args, **kwargs)
def _assemble_load(self, results, *args, **kwargs):
"""Concatenate results of different workers and update ``self``.
Parameters
----------
results : list
Workers' results.
Returns
-------
batch : EcgBatch
Assembled batch. Changes components inplace.
"""
_ = args, kwargs
self._reraise_exceptions(results)
components = kwargs.get("components", None)
if components is None:
components = self.components
for comp, data in zip(components, zip(*results)):
if comp == "signal":
data = np.array(data + (None,))[:-1]
else:
data = np.array(data)
setattr(self, comp, data)
return self
def _load_labels(self, src):
"""Load labels from a csv file or ``pandas.Series``.
Parameters
----------
src : str or Series
Path to csv file or ``pandas.Series``. The file should contain two
columns: ECG index and label. It shouldn't have a header.
Returns
-------
batch : EcgBatch
Batch with loaded labels. Changes ``self.target`` inplace.
Raises
------
TypeError
If ``src`` is not a string or ``pandas.Series``.
RuntimeError
If ``unique_labels`` has not been defined and the batch was not
created by a ``Pipeline``.
"""
if not isinstance(src, (str, pd.Series)):
raise TypeError("Unsupported type of source")
if isinstance(src, str):
src = pd.read_csv(src, header=None, names=["index", "label"], index_col=0)["label"]
self.target = src[self.indices].values
if self.unique_labels is None:
if self.pipeline is None:
raise RuntimeError("Batch with undefined unique_labels must be created in a pipeline")
ds_indices = self.pipeline.dataset.indices
self.unique_labels = np.sort(src[ds_indices].unique())
return self
def show_ecg(self, index=None, start=0, end=None, annot=None, subplot_size=(10, 4)): # pylint: disable=too-many-locals, line-too-long
"""Plot an ECG signal.
Optionally highlight QRS complexes along with P and T waves. Each
channel is displayed on a separate subplot.
Parameters
----------
index : element of ``self.indices``, optional
Index of a signal to plot. If undefined, the first ECG in the
batch is used.
start : int, optional
The start point of the displayed part of the signal (in seconds).
end : int, optional
The end point of the displayed part of the signal (in seconds).
annot : str, optional
If not ``None``, specifies attribute that stores annotation
obtained from ``cardio.models.HMModel``.
subplot_size : tuple
Width and height of each subplot in inches.
Raises
------
ValueError
If the chosen signal is not two-dimensional.
"""
i = 0 if index is None else self.get_pos(None, "signal", index)
signal, meta = self.signal[i], self.meta[i]
self._check_2d(signal)
fs = meta["fs"]
num_channels = signal.shape[0]
start = np.int(start * fs)
end = signal.shape[1] if end is None else np.int(end * fs)
figsize = (subplot_size[0], subplot_size[1] * num_channels)
_, axes = plt.subplots(num_channels, 1, squeeze=False, figsize=figsize)
for channel, (ax,) in enumerate(axes):
lead_name = "undefined" if meta["signame"][channel] == "None" else meta["signame"][channel]
units = "undefined" if meta["units"][channel] is None else meta["units"][channel]
ax.plot((np.arange(start, end) / fs), signal[channel, start:end])
ax.set_title("Lead name: {}".format(lead_name))
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Amplitude ({})".format(units))
ax.grid(True, which="major")
if annot and hasattr(self, annot):
def fill_segments(segment_states, color):
"""Fill ECG segments with a given color."""
starts, ends = bt.find_intervals_borders(signal_states, segment_states)
for start_t, end_t in zip((starts + start) / fs, (ends + start) / fs):
for (ax,) in axes:
ax.axvspan(start_t, end_t, color=color, alpha=0.3)
signal_states = getattr(self, annot)[i][start:end]
fill_segments(bt.QRS_STATES, "red")
fill_segments(bt.P_STATES, "green")
fill_segments(bt.T_STATES, "blue")
plt.tight_layout()
plt.show()
# Batch processing
@classmethod
def merge(cls, batches, batch_size=None):
"""Concatenate a list of ``EcgBatch`` instances and split the result
into two batches of sizes ``batch_size`` and ``sum(lens of batches) -
batch_size`` respectively.
Parameters
----------
batches : list
List of ``EcgBatch`` instances.
batch_size : positive int, optional
Length of the first resulting batch. If ``None``, equals the
length of the concatenated batch.
Returns
-------
new_batch : EcgBatch
Batch of no more than ``batch_size`` first items from the
concatenation of input batches. Contains a deep copy of input
batches' data.
rest_batch : EcgBatch
Batch of the remaining items. Contains a deep copy of input
batches' data.
Raises
------
ValueError
If ``batch_size`` is non-positive or non-integer.
"""
batches = [batch for batch in batches if batch is not None]
if len(batches) == 0:
return None, None
total_len = np.sum([len(batch) for batch in batches])
if batch_size is None:
batch_size = total_len
elif not isinstance(batch_size, int) or batch_size < 1:
raise ValueError("Batch size must be positive int")
indices = np.arange(total_len)
data = []
for comp in batches[0].components:
data.append(np.concatenate([batch.get(component=comp) for batch in batches]))
data = copy.deepcopy(data)
new_indices = indices[:batch_size]
new_batch = cls(ds.DatasetIndex(new_indices), unique_labels=batches[0].unique_labels)
new_batch._data = tuple(comp[:batch_size] for comp in data) # pylint: disable=protected-access, attribute-defined-outside-init, line-too-long
if total_len <= batch_size:
rest_batch = None
else:
rest_indices = indices[batch_size:]
rest_batch = cls(ds.DatasetIndex(rest_indices), unique_labels=batches[0].unique_labels)
rest_batch._data = tuple(comp[batch_size:] for comp in data) # pylint: disable=protected-access, attribute-defined-outside-init, line-too-long
return new_batch, rest_batch
# Versatile components processing
@ds.action
def apply_transform(self, func, *args, src="signal", dst="signal", **kwargs):
"""Apply a function to each item in the batch.
Parameters
----------
func : callable
A function to apply. Must accept an item of ``src`` as its first
argument if ``src`` is not ``None``.
src : str, array-like or ``None``, optional
The source to get the data from. If ``src`` is ``str``, it is
treated as the batch attribute or component name. Defaults to
``signal`` component.
dst : str, writeable array-like or ``None``, optional
The source to put the result in. If ``dst`` is ``str``, it is
treated as the batch attribute or component name. Defaults to
``signal`` component.
args : misc
Any additional positional arguments to ``func``.
kwargs : misc
Any additional named arguments to ``func``.
Returns
-------
batch : EcgBatch
Transformed batch. If ``dst`` is ``str``, the corresponding
attribute or component is changed inplace.
"""
if isinstance(dst, str) and not hasattr(self, dst):
setattr(self, dst, np.array([None] * len(self.index)))
return super().apply_transform(func, *args, src=src, dst=dst, **kwargs)
def _init_component(self, *args, **kwargs):
"""Create and preallocate a new attribute with the name ``dst`` if it
does not exist and return batch indices."""
_ = args
dst = kwargs.get("dst")
if dst is None:
raise KeyError("dst argument must be specified")
if not hasattr(self, dst):
setattr(self, dst, np.array([None] * len(self.index)))
return self.indices
@ds.action
@ds.inbatch_parallel(init="_init_component", src="signal", dst="signal", target="threads")
def apply_to_each_channel(self, index, func, *args, src="signal", dst="signal", **kwargs):
"""Apply a function to each slice of a signal over the axis 0
(typically the channel axis).
Parameters
----------
func : callable
A function to apply. Must accept a signal as its first argument.
src : str, optional
Batch attribute or component name to get the data from. Defaults
to ``signal`` component.
dst : str, optional
Batch attribute or component name to put the result in. Defaults
to ``signal`` component.
args : misc
Any additional positional arguments to ``func``.
kwargs : misc
Any additional named arguments to ``func``.
Returns
-------
batch : EcgBatch
Transformed batch. Changes ``dst`` attribute or component.
"""
i = self.get_pos(None, src, index)
src_data = getattr(self, src)[i]
dst_data = np.array([func(slc, *args, **kwargs) for slc in src_data])
getattr(self, dst)[i] = dst_data
# Labels processing
def _filter_batch(self, keep_mask):
"""Drop elements from a batch with corresponding ``False`` values in
``keep_mask``.
This method creates a new batch and updates only components and
``unique_labels`` attribute. The information stored in other
attributes will be lost.
Parameters
----------
keep_mask : bool 1-D ndarray
Filtering mask.
Returns
-------
batch : same class as self
Filtered batch.
Raises
------
SkipBatchException
If all batch data was dropped. If the batch is created by a
``pipeline``, its processing will be stopped and the ``pipeline``
will create the next batch.
"""
indices = self.indices[keep_mask]
if len(indices) == 0:
raise ds.SkipBatchException("All batch data was dropped")
batch = self.__class__(ds.DatasetIndex(indices), unique_labels=self.unique_labels)
for component in self.components:
setattr(batch, component, getattr(self, component)[keep_mask])
return batch
@ds.action
def drop_labels(self, drop_list):
"""Drop elements whose labels are in ``drop_list``.
This method creates a new batch and updates only components and
``unique_labels`` attribute. The information stored in other
attributes will be lost.
Parameters
----------
drop_list : list
Labels to be dropped from a batch.
Returns
-------
batch : EcgBatch
Filtered batch. Creates a new ``EcgBatch`` instance.
Raises
------
SkipBatchException
If all batch data was dropped. If the batch is created by a
``pipeline``, its processing will be stopped and the ``pipeline``
will create the next batch.
"""
drop_arr = np.asarray(drop_list)
self.unique_labels = np.setdiff1d(self.unique_labels, drop_arr)
keep_mask = ~np.in1d(self.target, drop_arr)
return self._filter_batch(keep_mask)
@ds.action
def keep_labels(self, keep_list):
"""Drop elements whose labels are not in ``keep_list``.
This method creates a new batch and updates only components and
``unique_labels`` attribute. The information stored in other
attributes will be lost.
Parameters
----------
keep_list : list
Labels to be kept in a batch.
Returns
-------
batch : EcgBatch
Filtered batch. Creates a new ``EcgBatch`` instance.
Raises
------
SkipBatchException
If all batch data was dropped. If the batch is created by a
``pipeline``, its processing will be stopped and the ``pipeline``
will create the next batch.
"""
keep_arr = np.asarray(keep_list)
self.unique_labels = np.intersect1d(self.unique_labels, keep_arr)
keep_mask = np.in1d(self.target, keep_arr)
return self._filter_batch(keep_mask)
@ds.action
def rename_labels(self, rename_dict):
"""Rename labels with corresponding values from ``rename_dict``.
Parameters
----------
rename_dict : dict
Dictionary containing ``(old label : new label)`` pairs.
Returns
-------
batch : EcgBatch
Batch with renamed labels. Changes ``self.target`` inplace.
"""
self.unique_labels = np.array(sorted({rename_dict.get(t, t) for t in self.unique_labels}))
self.target = np.array([rename_dict.get(t, t) for t in self.target])
return self
@ds.action
def binarize_labels(self):
"""Binarize labels in a batch in a one-vs-all fashion.
Returns
-------
batch : EcgBatch
Batch with binarized labels. Changes ``self.target`` inplace.
"""
self.target = self.label_binarizer.transform(self.target)
return self
# Channels processing
@ds.inbatch_parallel(init="indices", target="threads")
def _filter_channels(self, index, names=None, indices=None, invert_mask=False):
"""Build and apply a boolean mask for each channel of a signal based
on provided channels ``names`` and ``indices``.
Mask value for a channel is set to ``True`` if its name or index is
contained in ``names`` or ``indices`` respectively. The mask can be
inverted before its application if ``invert_mask`` flag is set to
``True``.
Parameters
----------
names : str or list or tuple, optional
Channels names used to construct the mask.
indices : int or list or tuple, optional
Channels indices used to construct the mask.
invert_mask : bool, optional
Specifies whether to invert the mask before its application.
Returns
-------
batch : EcgBatch
Batch with filtered channels. Changes ``self.signal`` and
``self.meta`` inplace.
Raises
------
ValueError
If both ``names`` and ``indices`` are empty.
ValueError
If all channels should be dropped.
"""
i = self.get_pos(None, "signal", index)
channels_names = np.asarray(self.meta[i]["signame"])
mask = np.zeros_like(channels_names, dtype=np.bool)
if names is None and indices is None:
raise ValueError("Both names and indices cannot be empty")
if names is not None:
names = np.asarray(names)
mask |= np.in1d(channels_names, names)
if indices is not None:
indices = np.asarray(indices)
mask |= np.array([i in indices for i in range(len(channels_names))])
if invert_mask:
mask = ~mask
if np.sum(mask) == 0:
raise ValueError("All channels cannot be dropped")
self.signal[i] = self.signal[i][mask]
self.meta[i]["signame"] = channels_names[mask]
self.meta[i]["units"] = self.meta[i]["units"][mask]
@ds.action
def drop_channels(self, names=None, indices=None):
"""Drop channels whose names are in ``names`` or whose indices are in
``indices``.
Parameters
----------
names : str or list or tuple, optional
Names of channels to be dropped from a batch.
indices : int or list or tuple, optional
Indices of channels to be dropped from a batch.
Returns
-------
batch : EcgBatch
Batch with dropped channels. Changes ``self.signal`` and
``self.meta`` inplace.
Raises
------
ValueError
If both ``names`` and ``indices`` are empty.
ValueError
If all channels should be dropped.
"""
return self._filter_channels(names, indices, invert_mask=True)
@ds.action
def keep_channels(self, names=None, indices=None):
"""Drop channels whose names are not in ``names`` and whose indices
are not in ``indices``.
Parameters
----------
names : str or list or tuple, optional
Names of channels to be kept in a batch.
indices : int or list or tuple, optional
Indices of channels to be kept in a batch.
Returns
-------
batch : EcgBatch
Batch with dropped channels. Changes ``self.signal`` and
``self.meta`` inplace.
Raises
------
ValueError
If both ``names`` and ``indices`` are empty.
ValueError
If all channels should be dropped.
"""
return self._filter_channels(names, indices, invert_mask=False)
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def rename_channels(self, index, rename_dict):
"""Rename channels with corresponding values from ``rename_dict``.
Parameters
----------
rename_dict : dict
Dictionary containing ``(old channel name : new channel name)``
pairs.
Returns
-------
batch : EcgBatch
Batch with renamed channels. Changes ``self.meta`` inplace.
"""
i = self.get_pos(None, "signal", index)
old_names = self.meta[i]["signame"]
new_names = np.array([rename_dict.get(name, name) for name in old_names], dtype=object)
self.meta[i]["signame"] = new_names
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def reorder_channels(self, index, new_order):
"""Change the order of channels in the batch according to the
``new_order``.
Parameters
----------
new_order : array_like
A list of channel names specifying the order of channels in the
transformed batch.
Returns
-------
batch : EcgBatch
Batch with reordered channels. Changes ``self.signal`` and
``self.meta`` inplace.
Raises
------
ValueError
If unknown lead names are specified.
ValueError
If all channels should be dropped.
"""
i = self.get_pos(None, "signal", index)
old_order = self.meta[i]["signame"]
diff = np.setdiff1d(new_order, old_order)
if diff.size > 0:
raise ValueError("Unknown lead names: {}".format(", ".join(diff)))
if len(new_order) == 0:
raise ValueError("All channels cannot be dropped")
transform_dict = {k: v for v, k in enumerate(old_order)}
indices = [transform_dict[k] for k in new_order]
self.signal[i] = self.signal[i][indices]
self.meta[i]["signame"] = self.meta[i]["signame"][indices]
self.meta[i]["units"] = self.meta[i]["units"][indices]
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def convert_units(self, index, new_units):
"""Convert units of signal's channels to ``new_units``.
Parameters
----------
new_units : str, dict or array_like
New units of signal's channels. Must be specified in SI format and
can be of one of the following types:
* ``str`` - defines the same new units for each channel.
* ``dict`` - defines the mapping from channel names to new
units. Channels, whose names are not in the dictionary,
remain unchanged.
* ``array_like`` - defines new units for corresponding
channels. The length of the sequence in this case must match
the number of channels.
Returns
-------
batch : EcgBatch
Batch with converted units. Changes ``self.signal`` and
``self.meta`` inplace.
Raises
------
ValueError
If ``new_units`` is ``array_like`` and its length doesn't match
the number of channels.
ValueError
If unknown units are used.
ValueError
If conversion between incompatible units is performed.
"""
i = self.get_pos(None, "signal", index)
old_units = self.meta[i]["units"]
channels_names = self.meta[i]["signame"]
if isinstance(new_units, str):
new_units = [new_units] * len(old_units)
elif isinstance(new_units, dict):
new_units = [new_units.get(name, unit) for name, unit in zip(channels_names, old_units)]
elif len(new_units) != len(old_units):
raise ValueError("The length of the new and old units lists must be the same")
factors = [get_units_conversion_factor(old, new) for old, new in zip(old_units, new_units)]
factors = np.array(factors).reshape(*([-1] + [1] * (self.signal[i].ndim - 1)))
self.signal[i] *= factors
self.meta[i]["units"] = np.asarray(new_units)
# Signal processing
@ds.action
def convolve_signals(self, kernel, padding_mode="edge", axis=-1, **kwargs):
"""Convolve signals with given ``kernel``.
Parameters
----------
kernel : 1-D array_like
Convolution kernel.
padding_mode : str or function, optional
``np.pad`` padding mode.
axis : int, optional
Axis along which signals are sliced. Default value is -1.
kwargs : misc
Any additional named arguments to ``np.pad``.
Returns
-------
batch : EcgBatch
Convolved batch. Changes ``self.signal`` inplace.
Raises
------
ValueError
If ``kernel`` is not one-dimensional or has non-numeric ``dtype``.
"""
for i in range(len(self.signal)):
self.signal[i] = bt.convolve_signals(self.signal[i], kernel, padding_mode, axis, **kwargs)
return self
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def band_pass_signals(self, index, low=None, high=None, axis=-1):
"""Reject frequencies outside a given range.
Parameters
----------
low : positive float, optional
High-pass filter cutoff frequency (in Hz).
high : positive float, optional
Low-pass filter cutoff frequency (in Hz).
axis : int, optional
Axis along which signals are sliced. Default value is -1.
Returns
-------
batch : EcgBatch
Filtered batch. Changes ``self.signal`` inplace.
"""
i = self.get_pos(None, "signal", index)
self.signal[i] = bt.band_pass_signals(self.signal[i], self.meta[i]["fs"], low, high, axis)
@ds.action
def drop_short_signals(self, min_length, axis=-1):
"""Drop short signals from a batch.
Parameters
----------
min_length : positive int
Minimal signal length.
axis : int, optional
Axis along which length is calculated. Default value is -1.
Returns
-------
batch : EcgBatch
Filtered batch. Creates a new ``EcgBatch`` instance.
"""
keep_mask = np.array([sig.shape[axis] >= min_length for sig in self.signal])
return self._filter_batch(keep_mask)
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def flip_signals(self, index, window_size=None, threshold=0):
"""Flip 2-D signals whose R-peaks are directed downwards.
Each element of ``self.signal`` must be a 2-D ndarray. Signals are
flipped along axis 1 (signal axis). For each subarray of
``window_size`` length skewness is calculated and compared with
``threshold`` to decide whether this subarray should be flipped or
not. Then the mode of the result is calculated to make the final
decision.
Parameters
----------
window_size : int, optional
Signal is split into K subarrays of ``window_size`` length. If it
is not possible, data in the end of the signal is removed. If
``window_size`` is not given, the whole array is checked without
splitting.
threshold : float, optional
If skewness of a subarray is less than the ``threshold``, it
"votes" for flipping the signal. Default value is 0.
Returns
-------
batch : EcgBatch
Batch with flipped signals. Changes ``self.signal`` inplace.
Raises
------
ValueError
If given signal is not two-dimensional.
"""
i = self.get_pos(None, "signal", index)
self._check_2d(self.signal[i])
sig = bt.band_pass_signals(self.signal[i], self.meta[i]["fs"], low=5, high=50)
sig = bt.convolve_signals(sig, kernels.gaussian(11, 3))
if window_size is None:
window_size = sig.shape[1]
number_of_splits = sig.shape[1] // window_size
sig = sig[:, : window_size * number_of_splits]
splits = np.split(sig, number_of_splits, axis=-1)
votes = [np.where(scipy.stats.skew(subseq, axis=-1) < threshold, -1, 1).reshape(-1, 1) for subseq in splits]
mode_of_votes = scipy.stats.mode(votes)[0].reshape(-1, 1)
self.signal[i] *= mode_of_votes
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def slice_signals(self, index, selection_object):
"""Perform indexing or slicing of signals in a batch. Allows basic
``NumPy`` indexing and slicing along with advanced indexing.
Parameters
----------
selection_object : slice or int or a tuple of slices and ints
An object that is used to slice signals.
Returns
-------
batch : EcgBatch
Batch with sliced signals. Changes ``self.signal`` inplace.
"""
i = self.get_pos(None, "signal", index)
self.signal[i] = self.signal[i][selection_object]
@staticmethod
def _pad_signal(signal, length, pad_value):
"""Pad signal with ``pad_value`` to the left along axis 1 (signal
axis).
Parameters
----------
signal : 2-D ndarray
Signals to pad.
length : positive int
Length of padded signal along axis 1.
pad_value : float
Padding value.
Returns
-------
signal : 2-D ndarray
Padded signals.
"""
pad_len = length - signal.shape[1]
sig = np.pad(signal, ((0, 0), (pad_len, 0)), "constant", constant_values=pad_value)
return sig
@staticmethod
def _get_segmentation_arg(arg, arg_name, target):
"""Get segmentation step or number of segments for a given signal.
Parameters
----------
arg : int or dict
Segmentation step or number of segments.
arg_name : str
Argument name.
target : hashable
Signal target.
Returns
-------
arg : positive int
Segmentation step or number of segments for given signal.
Raises
------
KeyError
If ``arg`` dict has no ``target`` key.
ValueError
If ``arg`` is not int or dict.
"""
if isinstance(arg, int):
return arg
if isinstance(arg, dict):
arg = arg.get(target)
if arg is None:
raise KeyError("Undefined {} for target {}".format(arg_name, target))
else:
return arg
else:
raise ValueError("Unsupported {} type".format(arg_name))
@staticmethod
def _check_segmentation_args(signal, target, length, arg, arg_name):
"""Check values of segmentation parameters.
Parameters
----------
signal : 2-D ndarray
Signals to segment.
target : hashable
Signal target.
length : positive int
Length of each segment along axis 1.
arg : positive int or dict
Segmentation step or number of segments.
arg_name : str
Argument name.
Returns
-------
arg : positive int
Segmentation step or number of segments for given signal.
Raises
------
ValueError
If:
* given signal is not two-dimensional,
* ``arg`` is not int or dict,
* ``length`` or ``arg`` for a given signal is negative or
non-integer.
KeyError
If ``arg`` dict has no ``target`` key.
"""
EcgBatch._check_2d(signal)
if (length <= 0) or not isinstance(length, int):
raise ValueError("Segment length must be positive integer")
arg = EcgBatch._get_segmentation_arg(arg, arg_name, target)
if (arg <= 0) or not isinstance(arg, int):
raise ValueError("{} must be positive integer".format(arg_name))
return arg
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def split_signals(self, index, length, step, pad_value=0):
"""Split 2-D signals along axis 1 (signal axis) with given ``length``
and ``step``.
If signal length along axis 1 is less than ``length``, it is padded to
the left with ``pad_value``.
Notice, that each resulting signal will be a 3-D ndarray of shape
``[n_segments, n_channels, length]``. If you would like to get a
number of 2-D signals of shape ``[n_channels, length]`` as a result,
you need to apply ``unstack_signals`` method then.
Parameters
----------
length : positive int
Length of each segment along axis 1.
step : positive int or dict
Segmentation step. If ``step`` is dict, segmentation step is
fetched by signal's target key.
pad_value : float, optional
Padding value. Defaults to 0.
Returns
-------
batch : EcgBatch
Batch of split signals. Changes ``self.signal`` inplace.
Raises
------
ValueError
If:
* given signal is not two-dimensional,
* ``step`` is not int or dict,
* ``length`` or ``step`` for a given signal is negative or
non-integer.
KeyError
If ``step`` dict has no signal's target key.
"""
i = self.get_pos(None, "signal", index)
step = self._check_segmentation_args(self.signal[i], self.target[i], length, step, "step size")
if self.signal[i].shape[1] < length:
tmp_sig = self._pad_signal(self.signal[i], length, pad_value)
self.signal[i] = tmp_sig[np.newaxis, ...]
else:
self.signal[i] = bt.split_signals(self.signal[i], length, step)
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def random_split_signals(self, index, length, n_segments, pad_value=0):
"""Split 2-D signals along axis 1 (signal axis) ``n_segments`` times
with random start position and given ``length``.
If signal length along axis 1 is less than ``length``, it is padded to
the left with ``pad_value``.
Notice, that each resulting signal will be a 3-D ndarray of shape
``[n_segments, n_channels, length]``. If you would like to get a
number of 2-D signals of shape ``[n_channels, length]`` as a result,
you need to apply ``unstack_signals`` method then.
Parameters
----------
length : positive int
Length of each segment along axis 1.
n_segments : positive int or dict
Number of segments. If ``n_segments`` is dict, number of segments
is fetched by signal's target key.
pad_value : float, optional
Padding value. Defaults to 0.
Returns
-------
batch : EcgBatch
Batch of split signals. Changes ``self.signal`` inplace.
Raises
------
ValueError
If:
* given signal is not two-dimensional,
* ``n_segments`` is not int or dict,
* ``length`` or ``n_segments`` for a given signal is negative
or non-integer.
KeyError
If ``n_segments`` dict has no signal's target key.
"""
i = self.get_pos(None, "signal", index)
n_segments = self._check_segmentation_args(self.signal[i], self.target[i], length,
n_segments, "number of segments")
if self.signal[i].shape[1] < length:
tmp_sig = self._pad_signal(self.signal[i], length, pad_value)
self.signal[i] = np.tile(tmp_sig, (n_segments, 1, 1))
else:
self.signal[i] = bt.random_split_signals(self.signal[i], length, n_segments)
@ds.action
def unstack_signals(self):
"""Create a new batch in which each signal's element along axis 0 is
considered as a separate signal.
This method creates a new batch and updates only components and
``unique_labels`` attribute. Signal's data from non-``signal``
components is duplicated using a deep copy for each of the resulting
signals. The information stored in other attributes will be lost.
Returns
-------
batch : same class as self
Batch with split signals and duplicated other components.
Examples
--------
>>> batch.signal
array([array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])],
dtype=object)
>>> batch = batch.unstack_signals()
>>> batch.signal
array([array([0, 1, 2, 3]),
array([4, 5, 6, 7]),
array([ 8, 9, 10, 11])],
dtype=object)
"""
n_reps = [sig.shape[0] for sig in self.signal]
signal = np.array([channel for signal in self.signal for channel in signal] + [None])[:-1]
index = ds.DatasetIndex(np.arange(len(signal)))
batch = self.__class__(index, unique_labels=self.unique_labels)
batch.signal = signal
for component_name in set(self.components) - {"signal"}:
val = []
component = getattr(self, component_name)
is_object_dtype = (component.dtype.kind == "O")
for elem, n in zip(component, n_reps):
for _ in range(n):
val.append(copy.deepcopy(elem))
if is_object_dtype:
val = np.array(val + [None])[:-1]
else:
val = np.array(val)
setattr(batch, component_name, val)
return batch
def _safe_fs_resample(self, index, fs):
"""Resample 2-D signal along axis 1 (signal axis) to given sampling
rate.
New sampling rate is guaranteed to be positive float.
Parameters
----------
fs : positive float
New sampling rate.
Raises
------
ValueError
If given signal is not two-dimensional.
"""
i = self.get_pos(None, "signal", index)
self._check_2d(self.signal[i])
new_len = max(1, int(fs * self.signal[i].shape[1] / self.meta[i]["fs"]))
self.meta[i]["fs"] = fs
self.signal[i] = bt.resample_signals(self.signal[i], new_len)
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def resample_signals(self, index, fs):
"""Resample 2-D signals along axis 1 (signal axis) to given sampling
rate.
Parameters
----------
fs : positive float
New sampling rate.
Returns
-------
batch : EcgBatch
Resampled batch. Changes ``self.signal`` and ``self.meta``
inplace.
Raises
------
ValueError
If given signal is not two-dimensional.
ValueError
If ``fs`` is negative or non-numeric.
"""
if fs <= 0:
raise ValueError("Sampling rate must be a positive float")
self._safe_fs_resample(index, fs)
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def random_resample_signals(self, index, distr, **kwargs):
"""Resample 2-D signals along axis 1 (signal axis) to a new sampling
rate, sampled from a given distribution.
If new sampling rate is negative, the signal is left unchanged.
Parameters
----------
distr : str or callable
``NumPy`` distribution name or a callable to sample from.
kwargs : misc
Distribution parameters.
Returns
-------
batch : EcgBatch
Resampled batch. Changes ``self.signal`` and ``self.meta``
inplace.
Raises
------
ValueError
If given signal is not two-dimensional.
ValueError
If ``distr`` is not a string or a callable.
"""
if hasattr(np.random, distr):
distr_fn = getattr(np.random, distr)
fs = distr_fn(**kwargs)
elif callable(distr):
fs = distr_fn(**kwargs)
else:
raise ValueError("Unknown type of distribution parameter")
if fs <= 0:
fs = self[index].meta["fs"]
self._safe_fs_resample(index, fs)
# Complex ECG processing
@ds.action
@ds.inbatch_parallel(init="_init_component", src="signal", dst="signal", target="threads")
def spectrogram(self, index, *args, src="signal", dst="signal", **kwargs):
"""Compute a spectrogram for each slice of a signal over the axis 0
(typically the channel axis).
This method is a wrapper around ``scipy.signal.spectrogram``, that
accepts the same arguments, except the ``fs`` which is substituted
automatically from signal's meta. The method returns only the
spectrogram itself.
Parameters
----------
src : str, optional
Batch attribute or component name to get the data from.
dst : str, optional
Batch attribute or component name to put the result in.
args : misc
Any additional positional arguments to
``scipy.signal.spectrogram``.
kwargs : misc
Any additional named arguments to ``scipy.signal.spectrogram``.
Returns
-------
batch : EcgBatch
Transformed batch. Changes ``dst`` attribute or component.
"""
i = self.get_pos(None, src, index)
fs = self.meta[i]["fs"]
src_data = getattr(self, src)[i]
dst_data = np.array([scipy.signal.spectrogram(slc, fs, *args, **kwargs)[-1] for slc in src_data])
getattr(self, dst)[i] = dst_data
@ds.action
@ds.inbatch_parallel(init="_init_component", src="signal", dst="signal", target="threads")
def standardize(self, index, axis=None, eps=1e-10, *, src="signal", dst="signal"):
"""Standardize data along specified axes by removing the mean and
scaling to unit variance.
Parameters
----------
axis : ``None`` or int or tuple of ints, optional
Axis or axes along which standardization is performed. The default
is to compute for the flattened array.
eps: float
Small addition to avoid division by zero.
src : str, optional
Batch attribute or component name to get the data from.
dst : str, optional
Batch attribute or component name to put the result in.
Returns
-------
batch : EcgBatch
Transformed batch. Changes ``dst`` attribute or component.
"""
i = self.get_pos(None, src, index)
src_data = getattr(self, src)[i]
dst_data = ((src_data - np.mean(src_data, axis=axis, keepdims=True)) /
np.std(src_data, axis=axis, keepdims=True) + eps)
getattr(self, dst)[i] = dst_data
@ds.action
@ds.inbatch_parallel(init="indices", target="threads")
def calc_ecg_parameters(self, index, src=None):
"""Calculate ECG report parameters and write them to the ``meta``
component.
Calculates PQ, QT, QRS intervals along with their borders and the
heart rate value based on the annotation and writes them to the
``meta`` component.
Parameters
----------
src : str
Batch attribute or component name to get the annotation from.
Returns
-------
batch : EcgBatch
Batch with report parameters stored in the ``meta`` component.
Raises
------
ValueError
If ``src`` is ``None`` or is not an attribute of a batch.
"""
if not (src and hasattr(self, src)):
raise ValueError("Batch does not have an attribute or component {}!".format(src))
i = self.get_pos(None, "signal", index)
src_data = getattr(self, src)[i]
self.meta[i]["hr"] = bt.calc_hr(self.signal[i],
src_data,
np.float64(self.meta[i]["fs"]),
bt.R_STATE)
self.meta[i]["pq"] = bt.calc_pq(src_data,
np.float64(self.meta[i]["fs"]),
bt.P_STATES,
bt.Q_STATE,
bt.R_STATE)
self.meta[i]["qt"] = bt.calc_qt(src_data,
np.float64(self.meta[i]["fs"]),
bt.T_STATES,
bt.Q_STATE,
bt.R_STATE)
self.meta[i]["qrs"] = bt.calc_qrs(src_data,
np.float64(self.meta[i]["fs"]),
bt.S_STATE,
bt.Q_STATE,
bt.R_STATE)
self.meta[i]["qrs_segments"] = np.vstack(bt.find_intervals_borders(src_data,
bt.QRS_STATES))
self.meta[i]["p_segments"] = np.vstack(bt.find_intervals_borders(src_data,
bt.P_STATES))
self.meta[i]["t_segments"] = np.vstack(bt.find_intervals_borders(src_data,
bt.T_STATES))
|
the-stack_106_23608 | ## @package gradient_checker
# Module caffe2.python.gradient_checker
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
class NetGradientChecker(object):
@staticmethod
def Check(net, outputs_with_grad, input_values,
input_to_check, step_size=0.0001,
threshold=0.05, print_net=True):
assert input_to_check in input_values.keys()
net_copy = net.Clone(net.Name() + "_copy")
grad_map = net_copy.AddGradientOperators(outputs_with_grad)
assert input_to_check in grad_map, (
'{} has no gradient, cannot check net gradient.'.format(
input_to_check))
for name, value in input_values.items():
workspace.blobs[name] = value
def GetLoss(new_value):
workspace.blobs[input_to_check] = new_value
workspace.RunNetOnce(net_copy)
return sum([
workspace.blobs[output]
for output in outputs_with_grad
]).sum()
def GetValue(dim, delta):
input_value = input_values[input_to_check].copy()
input_value.flat[dim] += delta
return input_value
workspace.RunNetOnce(net_copy)
grad_blob = grad_map[input_to_check]
def get_analytic_grad(grad_blob):
if isinstance(grad_blob, core.BlobReference):
return workspace.blobs[grad_blob]
# If grad_blob is not a single blob, it should be a gradient slice.
# To make it comparable with the estimiated gradient which is dense,
# we need to first convert grad_blob to dense gradient.
assert isinstance(grad_blob, core.GradientSlice)
dense_grad = 'tmp_dense_grad'
sparse_to_dense_op = core.CreateOperator(
'SparseToDense',
[grad_blob.indices, grad_blob.values, input_to_check],
dense_grad,
)
workspace.RunOperatorOnce(sparse_to_dense_op)
return workspace.blobs[dense_grad]
analytic_grad = get_analytic_grad(grad_blob)
grad_estimate = np.zeros_like(input_values[input_to_check])
for dim in range(input_values[input_to_check].size):
pos_loss = GetLoss(GetValue(dim, step_size))
neg_loss = GetLoss(GetValue(dim, -step_size))
grad_estimate.flat[dim] = (pos_loss - neg_loss) / step_size / 2
err_msg = "Error in gradient check for net_copy {}".format(
net.Name())
if print_net:
err_msg += ": {}".format(net.Proto())
np.testing.assert_allclose(
analytic_grad, grad_estimate,
atol=threshold, rtol=threshold,
err_msg=err_msg,
)
delta = np.abs(grad_estimate - analytic_grad).flatten()
return np.mean(delta), max(delta)
class GradientChecker:
"""A gradient checker in Python.
This is not the most efficient way to check gradients, as the Python
interface will involve a lot of copy back and forth operations. Use at your
own risk.
"""
def __init__(
self,
stepsize,
threshold,
device_option=caffe2_pb2.DeviceOption(),
workspace_name="gradient_check"
):
self._stepsize = stepsize
self._threshold = threshold
self._device_option = device_option
self._workspace_name = workspace_name
def GetLossAndGrad(
self, op, grad_ops, x, input_name, grad_name, outputs_with_grads
):
# First, feed in the current input. Note that we are not changing
# anything else, so we don't need to feed in others.
workspace.FeedBlob(input_name, x, self._device_option)
# Run.
workspace.RunOperatorOnce(op)
loss = 0.
# Get Loss and feed in the gradients, run gradient ops.
for idx in outputs_with_grads:
name = op.output[idx]
arr = workspace.FetchBlob(name)
loss += (arr**2).sum()
workspace.FeedBlob(name + '_grad', arr, self._device_option)
loss /= 2.
# Run gradient ops
workspace.RunOperatorsOnce(grad_ops)
# Get gradients
if isinstance(grad_name, core.GradientSlice):
workspace.FeedBlob('zeros', np.zeros_like(x, dtype=np.float32))
workspace.FeedBlob('ones', np.ones(1, dtype=np.float32))
gv_cpu_op = core.CreateOperator(
'EnsureCPUOutput', grad_name.values, grad_name.values + '_cpu',
device_option=self._device_option
)
gi_cpu_op = core.CreateOperator(
'EnsureCPUOutput', grad_name.indices, grad_name.indices + '_cpu',
device_option=self._device_option
)
sparse_to_dense_op = core.CreateOperator(
'ScatterWeightedSum',
[
'zeros', 'ones', grad_name.indices + '_cpu',
grad_name.values + '_cpu', 'ones'
],
'zeros',
)
workspace.RunOperatorOnce(gv_cpu_op)
workspace.RunOperatorOnce(gi_cpu_op)
workspace.RunOperatorOnce(sparse_to_dense_op)
grad = workspace.FetchBlob('zeros')
else:
grad = workspace.FetchBlob(grad_name)
return loss, grad
def CheckSimple(
self,
op,
inputs,
input_to_check,
outputs_with_grads,
grad_ops=None,
input_device_options=None
):
"""Checks the operator in a very simple fashion by stacking a sum of
squares on the top.
Inputs:
op: the operator to be checked.
inputs: the input data in numpy arrays.
input_to_check: an index specifying which input blob we should
check.
outputs_with_grads: indices specifying which output blobs will we
need to check gradients with. For these outputs, we will collect a
squared sum and also feed in their gradients.
grad_operator: the gradient operator. If not given, we will get the
gradient operator from the gradient registry.
input_device_options: an optional mapping from input names to
DeviceOptions (to override the default DeviceOption)
Outputs:
boolean: True if it passes, False if it does not pass.
"""
if input_device_options is None:
input_device_options = {}
# Entering the checker workspace
old_ws_name = workspace.CurrentWorkspace()
if self._workspace_name != old_ws_name:
workspace.SwitchWorkspace(self._workspace_name, True)
op.device_option.CopyFrom(self._device_option)
if grad_ops is None:
# TODO(jiayq): use the gradient registration instead of the old
# hack.
grad_ops, g_input = core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
dims_to_check = inputs[input_to_check].size
# First, feed in the input.
for i, arr in enumerate(inputs):
workspace.FeedBlob(
op.input[i], arr,
input_device_options.get(
op.input[i], self._device_option))
# Get the loss and gradient for the original.
input_name = op.input[input_to_check]
grad_name = g_input[input_to_check]
loss, grad = self.GetLossAndGrad(
op, grad_ops, inputs[input_to_check], input_name, grad_name,
outputs_with_grads
)
grad_estimate = np.zeros_like(inputs[input_to_check])
if grad_estimate.shape != grad.shape:
raise Exception(
"Mismatched gradient shapes: estimated ({}), grad ({})".format(
grad_estimate.shape, grad.shape))
for current_dim in range(dims_to_check):
# Positive gradient
inputs[input_to_check].flat[current_dim] += self._stepsize
pos_loss, _ = self.GetLossAndGrad(
op, grad_ops, inputs[input_to_check], input_name,
grad_name, outputs_with_grads
)
# Negative gradient
inputs[input_to_check].flat[current_dim] -= self._stepsize * 2
neg_loss, _ = self.GetLossAndGrad(
op, grad_ops, inputs[input_to_check], input_name,
grad_name, outputs_with_grads
)
# Recover the value
inputs[input_to_check].flat[current_dim] += self._stepsize
grad_estimate.flat[current_dim] = (
pos_loss - neg_loss) / self._stepsize / 2
# Now, check correctness
fail_mat = ~np.isclose(
grad, grad_estimate, atol=self._threshold, rtol=self._threshold)
if np.any(fail_mat):
idx = np.flatnonzero(fail_mat)
print('Failed. [idx, grad, grad_estimate] are:')
print(np.vstack([idx, grad.flat[idx], grad_estimate.flat[idx]]).T)
ret = False
else:
ret = True
# After finishing, cleaning up things.
if self._workspace_name != old_ws_name:
# We reset the workspace to make sure everything intermediate is
# cleaned up. Note that there is no need to delete a workspace -
# when empty it takes a very limited amount of memory.
workspace.ResetWorkspace()
workspace.SwitchWorkspace(old_ws_name)
return ret, grad, grad_estimate
|
the-stack_106_23609 | ##
## MIT License
##
## Copyright (c) 2016 Luca Angioloni
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
import numpy as np
import cv2
from timeit import default_timer as timer
# face_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv3/3.1.0_3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
# eye_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv3/3.1.0_3/share/OpenCV/haarcascades/haarcascade_eye.xml')
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# fullBody = cv2.CascadeClassifier('/usr/local/Cellar/opencv3/3.1.0_3/share/OpenCV/haarcascades/haarcascade_fullbody.xml')
print(face_cascade.empty())
camera = cv2.VideoCapture(0)
if camera.isOpened(): # try to get the first frame
rval, frame = camera.read()
else:
rval = False
count = 0
start_time = timer()
while rval:
count += 1
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
cv2.imshow("preview", frame)
now = timer()
if (now - start_time >= 1):
print(str(count) + "fps")
count = 0
start_time = now
rval, frame = camera.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
cv2.destroyWindow("preview") |
the-stack_106_23611 | vowels = 'aiyeou'
consonants = 'bkxznhdcwgpvjqtsrlmf'
ans = ''
while True:
try:
text = input()
for t in text:
lowerT = t.lower()
if lowerT in vowels:
tmp = vowels[(vowels.index(lowerT) + 3) % 6]
ans += tmp.upper() if t.isupper() else tmp
elif lowerT in consonants:
tmp = consonants[(consonants.index(lowerT) + 10) % 20]
ans += tmp.upper() if t.isupper() else tmp
else:
ans += t
ans += '\n'
except:
break
print(ans)
|
the-stack_106_23612 | #!/usr/bin/python
# Representation of logic circuit as AIG
import sys
class AigRef:
node = None
negate = False
def __init__(self, node, negate):
self.node = node
self.negate = negate
def encode(self):
return 2 * self.node.id + (1 if self.negate else 0)
def isZero(self):
return self.node.id == 0 and not self.negate
def isOne(self):
return self.node.id == 0 and self.negate
def __str__(self):
if self.isZero():
return "0"
elif self.isOne():
return "1"
else:
return ("!" if self.negate else "") + str(self.node.id)
def __eq__(self, other):
return self.node == other.node and self.negate == other.negate
def __hash__(self):
return self.encode()
# Represent either AND node or input node
class AigNode:
id = 0
children = [] # AigRefs of children. Empty if input node
isInput = False
def __init__(self, id, children):
self.id = id
self.children = children
self.isInput = len(children) == 0
# Generate string representing declaration line
def declare(self):
ilist = [2*self.id] + [c.encode() for c in self.children]
slist = [str(i) for i in ilist]
return " ".join(slist)
def __str__(self):
if len(self.children) > 1:
return "%d = %s & %s" % (self.id, str(self.children[0]), str(self.children[1]))
else:
return "%d = input" % (self.id)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return self.id
# Restricted form of AIG, only having combinational circuit
class AiGraph:
inputs = []
outputs = []
gates = [] # Set of all gates, including constant zero
gateMap = {} # Mapping from ref X ref --> gate
nextId = 1
comments = [] # Lines of comment text
zeroRef = None # Ref representing constant zero
oneRef = None # Ref representing constant one
def __init__(self, inputCount):
self.outputs = []
cnode = AigNode(0, [])
self.gates = [cnode]
self.refMap = {}
self.comments = []
self.nextId = 1
self.zeroRef = self.makeRef(cnode, False)
self.oneRef = self.makeRef(cnode, True)
self.inputs = [self.makeRef(self.newNode([]), False) for id in range(inputCount)]
def newNode(self, children):
node = AigNode(self.nextId, children)
self.nextId += 1
self.gates.append(node)
return node
def makeRef(self, node, negate):
return AigRef(node, negate)
def makeOutput(self, ref):
self.outputs.append(ref)
# Generate and of two refs.
# Handle degenerate cases
# Reuse existing gate if available
def findOrMake(self, ref1, ref2):
# Constant cases
if ref1.isZero() or ref2.isZero():
return self.zeroRef
elif ref1.isOne():
return ref2
elif ref2.isOne():
return ref1
key = (ref1, ref2)
if (ref1, ref2) not in self.gateMap:
gate = self.newNode([ref1, ref2])
self.gateMap[key] = gate
return self.makeRef(self.gateMap[key], False)
def negOp(self, ref):
return AigRef(ref.node, not ref.negate)
def andOp(self, ref1, ref2):
ref = self.findOrMake(ref1, ref2)
print("And(%s,%s)-->%s" % (str(ref1), str(ref2), str(ref)))
return ref
def orOp(self, ref1, ref2):
nref1 = self.negOp(ref1)
nref2 = self.negOp(ref2)
return self.negOp(self.andOp(nref1, nref2))
def iteOp(self, iref, tref, fref):
if tref == fref:
return tref
hi = self.andOp(iref, tref)
lo = self.andOp(self.negOp(iref), fref)
return self.orOp(hi, lo)
def comment(self, line):
self.comments.append(line)
def header(self, I):
M = len(self.inputs)
L = 0
O = len(self.outputs)
A = len(self.gates)-1
ilist = [M, I, L, O, A]
slist = ["aag"] + [str(i) for i in ilist]
return " ".join(slist)
def generate(self, outfile = sys.stdout):
realInputs = set([])
for oref in self.outputs:
onode = oref.node
if onode != self.gates[0] and onode.isInput and onode not in realInputs:
realInputs |= { onode }
for g in self.gates:
for cref in g.children:
c = cref.node
if c != self.gates[0] and c.isInput and c not in realInputs:
realInputs |= { c }
rlist = sorted([i for i in realInputs], key=lambda g: g.id)
h = self.header(len(rlist))
outfile.write(h + '\n')
for inode in rlist:
outfile.write(inode.declare() + '\n')
for oref in self.outputs:
outfile.write(str(oref.encode()) + '\n')
for gnode in self.gates[1:]:
if not gnode.isInput:
outfile.write(gnode.declare() + '\n')
if len(self.comments) > 1:
outfile.write("c\n")
for line in self.comments:
outfile.write(line + '\n')
|
the-stack_106_23613 | import time
import numpy as np
import cudarray as ca
from ..feed import Feed
from ..parameter import SharedParameter
import logging
log = logging.getLogger(__name__)
class GradientDescent(object):
def __init__(self, model, feed, learn_rule):
self.feed = Feed.from_any(feed)
self.learn_rule = learn_rule
self.model = model
self.params = None
self.learn_rule_states = None
self.reset()
def reset(self):
self.feed.reset()
self.model.setup(*self.feed.shapes)
self.params = [p for p in self.model.params
if not isinstance(p, SharedParameter)]
self.learn_rule_states = [self.learn_rule.init_state(p)
for p in self.params]
n_params = np.sum([p.array.size for p in self.params])
log.info('SGD: Model contains %i parameters.', n_params)
log.info('SGD: %d gradient updates per epoch.', self.feed.epoch_size)
def train_epoch(self):
batch_losses = []
for batch in self.feed.batches():
loss = np.array(ca.mean(self.model.update(*batch)))
for param, state in zip(self.params, self.learn_rule_states):
self.learn_rule.step(param, state)
batch_losses.append(loss)
epoch_loss = np.mean(batch_losses)
return epoch_loss
def train_epochs(self, n_epochs, annealer=None, error_fun=None):
self.train_patience(annealer, error_fun, min_epochs=n_epochs,
max_epochs=n_epochs)
def train_patience(self, annealer=None, error_fun=None, min_epochs=5,
max_epochs=1000, improvement_thresh=0.995,
patience_incr=1.5):
epoch = 0
converged = False
patience = min_epochs
best_score = np.inf
start_time = time.clock()
while epoch < max_epochs and not converged:
epoch += 1
self.model.phase = 'train'
self.model.setup(*self.feed.shapes)
epoch_loss = self.train_epoch()
if error_fun is None:
epoch_error = epoch_loss
else:
epoch_error = error_fun()
if epoch_error < best_score:
improvement = epoch_error / best_score
if improvement < improvement_thresh:
# increase patience on significant improvement
patience = max(patience, epoch*patience_incr)
best_score = epoch_error
if error_fun is None:
log.info('epoch %d/%d, loss %f', epoch, patience, epoch_loss)
else:
log.info('epoch %d/%d, loss %f, error %.4f', epoch, patience,
epoch_loss, epoch_error)
for param in self.params:
param.monitor()
if patience < epoch:
log.info('SGD: Converged.')
converged = True
if annealer is not None:
self.learn_rule.learn_rate = annealer.value(epoch)
end_time = time.clock()
if not converged:
log.info('SGD: Stopped by max_epochs.')
duration = float(end_time - start_time)
log.info('SGD: Optimization ran for %.2f minutes (%d epochs, '
'%.1f s/epoch)', duration/60, epoch, duration/epoch)
|
the-stack_106_23615 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import sys
import traceback
import shlex
from typing import List, Optional, Callable, Tuple
def _parser_exit(parser: argparse.ArgumentParser, proc: "DirectoryListProcessor", _=0,
message: Optional[str]=None) -> None:
"""
Override the default exit in the parser.
:param parser:
:param _: exit code. Unused because we don't exit
:param message: Optional message
"""
if message:
parser._print_message(message, sys.stderr)
proc.successful_parse = False
class DirectoryListProcessor:
def __init__(self, args: Optional[List[str]], description: str, infile_suffix: Optional[str],
outfile_suffix: Optional[str], addargs: Optional[Callable[[argparse.ArgumentParser], None]]=None,
postparse: Optional[Callable[[argparse.Namespace], None]]=None,
noexit: bool=False, fromfile_prefix_chars: Optional[str]=None):
""" Build a directory list processor
:param args: Input arguments such as supplied from sys.argv. None means use sys.argv
:param description: Description of the function. Appears in a help string
:param infile_suffix: Suffix filter on input file. If absent, all files not starting with "." pass
:param outfile_suffix: Suffix to add to output file. If absent, name is same as input
:param addargs: Function to add arguments before parsing. Signature: addargs(parser: argparse.ArgumentParser)
:param postparse: Function to review arguments post parsing. Signature: postparse(opts: argparse.Namespace)
:param noexit: Do not exit the parser on error. Primarily for testing. If an exitable error occurs,
succesful_parse is set to False
:param fromfile_prefix_chars: parser file prefix characters
"""
self.infile_suffix = infile_suffix
self.outfile_suffix = outfile_suffix
self.successful_parse = True
self.fromfile_prefix_chars = fromfile_prefix_chars if fromfile_prefix_chars else ""
self.parser = argparse.ArgumentParser(description=description, fromfile_prefix_chars=fromfile_prefix_chars)
self.parser.add_argument("-i", "--infile", help="Input file(s)", nargs="*")
self.parser.add_argument("-id", "--indir", help="Input directory")
self.parser.add_argument("-o", "--outfile", help="Output file(s)", nargs="*")
self.parser.add_argument("-od", "--outdir", help="Output directory")
self.parser.add_argument("-f", "--flatten", help="Flatten output directory", action="store_true")
self.parser.add_argument("-s", "--stoponerror", help="Stop on processing error", action="store_true")
if addargs is not None:
addargs(self.parser)
if noexit:
self.parser.exit = lambda *args: _parser_exit(self.parser, self, *args)
self.opts = self.parser.parse_args(self.decode_file_args(args if args is not None else sys.argv[1:]))
if self.successful_parse:
if self.opts.indir and not os.path.isdir(self.opts.indir):
if os.path.exists(self.opts.indir):
self.parser.error("{} is not a directory".format(self.opts.indir))
else:
self.parser.error("Directory {} does not exist".format(self.opts.indir))
return
n_infiles = len(self.opts.infile) if self.opts.infile else 0
n_outfiles = len(self.opts.outfile) if self.opts.outfile else 0
if (n_infiles > 1 or n_outfiles > 1) and n_infiles != n_outfiles and n_outfiles > 1:
self.parser.error("Number of input and output files must match")
return
if postparse is not None:
postparse(self.opts)
def decode_file_args(self, argv: List[str]) -> List[str]:
"""
Preprocess any arguments that begin with the fromfile prefix char(s).
This replaces the one in Argparse because it
a) doesn't process "-x y" correctly and
b) ignores bad files
:param argv: raw options list
:return: options list with file references replaced
"""
for arg in [arg for arg in argv if arg[0] in self.fromfile_prefix_chars]:
argv.remove(arg)
with open(arg[1:]) as config_file:
argv += shlex.split(config_file.read())
return self.decode_file_args(argv)
return argv
@staticmethod
def _proc_error(ifn: str, e: Exception) -> None:
""" Report an error
:param ifn: Input file name
:param e: Exception to report
"""
type_, value_, traceback_ = sys.exc_info()
traceback.print_tb(traceback_, file=sys.stderr)
print(file=sys.stderr)
print("***** ERROR: %s" % ifn, file=sys.stderr)
print(str(e), file=sys.stderr)
def _call_proc(self,
proc: Callable[[Optional[str], Optional[str], argparse.Namespace], bool],
ifn: Optional[str],
ofn: Optional[str]) -> bool:
""" Call the actual processor and intercept anything that goes wrong
:param proc: Process to call
:param ifn: Input file name to process. If absent, typical use is stdin
:param ofn: Output file name. If absent, typical use is stdout
:return: true means process was successful
"""
rslt = False
try:
rslt = proc(ifn, ofn, self.opts)
except Exception as e:
self._proc_error(ifn, e)
return True if rslt or rslt is None else False
def _check_filter(self,
fn: Optional[str],
dirpath: Optional[str],
file_filter: Optional[Callable[[str], bool]],
file_filter_2: Optional[Callable[[Optional[str], str, argparse.Namespace], bool]]) -> bool:
rval = (fn is None or ('://' in fn or fn.endswith(self.infile_suffix))) and \
(not file_filter or file_filter(fn)) and \
(not file_filter_2 or file_filter_2(fn, dirpath if dirpath is not None else '', self.opts)) and \
(file_filter or file_filter_2 or fn is None or not fn.startswith('.'))
return rval
def run(self,
proc: Callable[[Optional[str], Optional[str], argparse.Namespace], Optional[bool]],
file_filter: Optional[Callable[[str], bool]]=None,
file_filter_2: Optional[Callable[[Optional[str], str, argparse.Namespace], bool]]=None) \
-> Tuple[int, int]:
""" Run the directory list processor calling a function per file.
:param proc: Process to invoke. Args: input_file_name, output_file_name, argparse options. Return pass or fail.
No return also means pass
:param file_filter: Additional filter for testing file names, types, etc.
:param file_filter_2: File filter that includes directory, filename and opts
(separate for backwards compatibility)
:return: tuple - (number of files passed to proc: int, number of files that passed proc)
"""
nfiles = 0
nsuccess = 0
# List of one or more input and output files
if self.opts.infile:
for file_idx in range(len(self.opts.infile)):
in_f = self.opts.infile[file_idx]
if self._check_filter(in_f, self.opts.indir, file_filter, file_filter_2):
fn = os.path.join(self.opts.indir, in_f) if self.opts.indir else in_f
nfiles += 1
if self._call_proc(proc, fn, self._outfile_name('', fn, outfile_idx=file_idx)):
nsuccess += 1
elif self.opts.stoponerror:
return nfiles, nsuccess
# Single input from the command line
elif not self.opts.indir:
if self._check_filter(None, None, file_filter, file_filter_2):
nfiles += 1
if self._call_proc(proc, None, self._outfile_name('', '')):
nsuccess += 1
# Input directory that needs to be navigated
else:
for dirpath, _, filenames in os.walk(self.opts.indir):
for fn in filenames:
if self._check_filter(fn, dirpath, file_filter, file_filter_2):
nfiles += 1
if self._call_proc(proc, os.path.join(dirpath, fn), self._outfile_name(dirpath, fn)):
nsuccess += 1
elif self.opts.stoponerror:
return nfiles, nsuccess
return nfiles, nsuccess
def _outfile_name(self, dirpath: str, infile: str, outfile_idx: int=0) -> Optional[str]:
""" Construct the output file name from the input file. If a single output file was named and there isn't a
directory, return the output file.
:param dirpath: Directory path to infile
:param infile: Name of input file
:param outfile_idx: Index into output file list (for multiple input/output files)
:return: Full name of output file or None if output is not otherwise supplied
"""
if not self.opts.outfile and not self.opts.outdir:
# Up to the process itself to decide what do do with it
return None
if self.opts.outfile:
# Output file specified - either one aggregate file or a 1 to 1 list
outfile_element = self.opts.outfile[0] if len(self.opts.outfile) == 1 else self.opts.outfile[outfile_idx]
elif self.opts.infile:
# Input file name(s) have been supplied
if '://' in infile:
# Input file is a URL -- generate an output file of the form "_url[n]"
outfile_element = "_url{}".format(outfile_idx + 1)
else:
outfile_element = os.path.basename(infile).rsplit('.', 1)[0]
else:
# Doing an input directory to an output directory
relpath = dirpath[len(self.opts.indir) + 1:] if not self.opts.flatten and self.opts.indir else ''
outfile_element = os.path.join(relpath, os.path.split(infile)[1][:-len(self.infile_suffix)])
return (os.path.join(self.opts.outdir, outfile_element) if self.opts.outdir else outfile_element) + \
(self.outfile_suffix if not self.opts.outfile and self.outfile_suffix else '')
def default_proc(ifn: Optional[str], ofn: Optional[str], _: argparse.Namespace) -> bool:
print("Input file name: %s -- Output file name: %s" % (ifn if ifn is not None else "stdin",
ofn if ofn is not None else "stdout"))
return True
if __name__ == '__main__':
DirectoryListProcessor(None, "DLP Framework", "", "").run(default_proc)
|
the-stack_106_23616 | import discord
import src.commands as cmd
import src.config as cfg
from .log import logger
client = discord.Client()
@client.event
async def on_ready(*args, **kwargs) -> None:
logger.info('discord ready')
@client.event
async def on_message(message: discord.Message) -> None:
if message.author.id == client.user.id and message.content.startswith(cfg.prefix):
parts = message.content.split(' ')
command = cmd.commands[parts[0][len(cfg.prefix):]]
await command(message)
|
the-stack_106_23617 | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'epmcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
the-stack_106_23619 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="youtubedata",
version="1.1.1",
author="ofc",
author_email="[email protected]",
description="YouTube Data provides comprehensive YouTube video metadata scraping. Results are returned in a dictionary containing likes, dislikes, views, published dates and more.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AAbdelmalek/youtubedata_library",
packages=setuptools.find_packages(),
install_requires=["requests","beautifulsoup4","lxml"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Multimedia :: Video"
],
) |
the-stack_106_23620 | from __future__ import absolute_import
import logging
import six
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from sudo.views import redirect_to_sudo
from sentry import roles
from sentry.auth import access
from sentry.models import (
AuditLogEntry, Organization, OrganizationMember, OrganizationStatus, Project, ProjectStatus,
Team, TeamStatus
)
from sentry.utils import auth
from sentry.web.helpers import render_to_response
from sentry.api.serializers import serialize
logger = logging.getLogger(__name__)
audit_logger = logging.getLogger('sentry.audit.ui')
class OrganizationMixin(object):
# TODO(dcramer): move the implicit organization logic into its own class
# as it's only used in a single location and over complicates the rest of
# the code
def get_active_organization(self, request, organization_slug=None):
"""
Returns the currently active organization for the request or None
if no organization.
"""
# TODO(dcramer): this is a huge hack, and we should refactor this
# it is currently needed to handle the is_auth_required check on
# OrganizationBase
active_organization = getattr(self, '_active_org', None)
cached_active_org = (
active_organization and active_organization[0].slug == organization_slug
and active_organization[1] == request.user
)
if cached_active_org:
return active_organization[0]
active_organization = None
is_implicit = organization_slug is None
if is_implicit:
organization_slug = request.session.get('activeorg')
if organization_slug is not None:
if request.is_superuser():
try:
active_organization = Organization.objects.get_from_cache(
slug=organization_slug,
)
if active_organization.status != OrganizationStatus.VISIBLE:
raise Organization.DoesNotExist
except Organization.DoesNotExist:
logger.info('Active organization [%s] not found', organization_slug)
if active_organization is None:
organizations = Organization.objects.get_for_user(
user=request.user,
)
if active_organization is None and organization_slug:
try:
active_organization = six.next(
o for o in organizations if o.slug == organization_slug
)
except StopIteration:
logger.info('Active organization [%s] not found in scope', organization_slug)
if is_implicit:
del request.session['activeorg']
active_organization = None
if active_organization is None:
if not is_implicit:
return None
try:
active_organization = organizations[0]
except IndexError:
logger.info('User is not a member of any organizations')
pass
if active_organization and self._is_org_member(request.user, active_organization):
if active_organization.slug != request.session.get('activeorg'):
request.session['activeorg'] = active_organization.slug
self._active_org = (active_organization, request.user)
return active_organization
def _is_org_member(self, user, organization):
return OrganizationMember.objects.filter(
user=user,
organization=organization,
).exists()
def get_active_team(self, request, organization, team_slug):
"""
Returns the currently selected team for the request or None
if no match.
"""
try:
team = Team.objects.get_from_cache(
slug=team_slug,
organization=organization,
)
except Team.DoesNotExist:
return None
if team.status != TeamStatus.VISIBLE:
return None
return team
def get_active_project(self, request, organization, project_slug):
try:
project = Project.objects.get_from_cache(
slug=project_slug,
organization=organization,
)
except Project.DoesNotExist:
return None
if project.status != ProjectStatus.VISIBLE:
return None
return project
def redirect_to_org(self, request):
from sentry import features
# TODO(dcramer): deal with case when the user cannot create orgs
organization = self.get_active_organization(request)
if organization:
url = reverse('sentry-organization-home', args=[organization.slug])
elif not features.has('organizations:create'):
return self.respond('sentry/no-organization-access.html', status=403)
else:
url = '/organizations/new/'
return HttpResponseRedirect(url)
class BaseView(View, OrganizationMixin):
auth_required = True
# TODO(dcramer): change sudo so it can be required only on POST
sudo_required = False
def __init__(self, auth_required=None, sudo_required=None, *args, **kwargs):
if auth_required is not None:
self.auth_required = auth_required
if sudo_required is not None:
self.sudo_required = sudo_required
super(BaseView, self).__init__(*args, **kwargs)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if self.is_auth_required(request, *args, **kwargs):
return self.handle_auth_required(request, *args, **kwargs)
if self.is_sudo_required(request, *args, **kwargs):
return self.handle_sudo_required(request, *args, **kwargs)
args, kwargs = self.convert_args(request, *args, **kwargs)
request.access = self.get_access(request, *args, **kwargs)
if not self.has_permission(request, *args, **kwargs):
return self.handle_permission_required(request, *args, **kwargs)
self.request = request
self.default_context = self.get_context_data(request, *args, **kwargs)
return self.handle(request, *args, **kwargs)
def get_access(self, request, *args, **kwargs):
return access.DEFAULT
def convert_args(self, request, *args, **kwargs):
return (args, kwargs)
def handle(self, request, *args, **kwargs):
return super(BaseView, self).dispatch(request, *args, **kwargs)
def is_auth_required(self, request, *args, **kwargs):
return (
self.auth_required and not (request.user.is_authenticated() and request.user.is_active)
)
def handle_auth_required(self, request, *args, **kwargs):
auth.initiate_login(request, next_url=request.get_full_path())
if 'organization_slug' in kwargs:
redirect_to = reverse('sentry-auth-organization', args=[kwargs['organization_slug']])
else:
redirect_to = auth.get_login_url()
return self.redirect(redirect_to)
def is_sudo_required(self, request, *args, **kwargs):
return self.sudo_required and not request.is_sudo()
def handle_sudo_required(self, request, *args, **kwargs):
return redirect_to_sudo(request.get_full_path())
def has_permission(self, request, *args, **kwargs):
return True
def handle_permission_required(self, request, *args, **kwargs):
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def get_no_permission_url(request, *args, **kwargs):
return reverse('sentry-login')
def get_context_data(self, request, **kwargs):
context = csrf(request)
return context
def respond(self, template, context=None, status=200):
default_context = self.default_context
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request, status=status)
def redirect(self, url):
return HttpResponseRedirect(url)
def get_team_list(self, user, organization):
return Team.objects.get_for_user(
organization=organization,
user=user,
with_projects=True,
)
def create_audit_entry(self, request, transaction_id=None, **kwargs):
entry = AuditLogEntry(
actor=request.user if request.user.is_authenticated() else None,
# TODO(jtcunning): assert that REMOTE_ADDR is a real IP.
ip_address=request.META['REMOTE_ADDR'],
**kwargs
)
# Only create a real AuditLogEntry record if we are passing an event type
# otherwise, we want to still log to our actual logging
if entry.event is not None:
entry.save()
extra = {
'ip_address': entry.ip_address,
'organization_id': entry.organization_id,
'object_id': entry.target_object,
'entry_id': entry.id,
'actor_label': entry.actor_label
}
if transaction_id is not None:
extra['transaction_id'] = transaction_id
audit_logger.info(entry.get_event_display(), extra=extra)
return entry
class OrganizationView(BaseView):
"""
Any view acting on behalf of an organization should inherit from this base.
The 'organization' keyword argument is automatically injected into the
resulting dispatch.
"""
required_scope = None
valid_sso_required = True
def get_access(self, request, organization, *args, **kwargs):
if organization is None:
return access.DEFAULT
return access.from_request(request, organization)
def get_context_data(self, request, organization, **kwargs):
context = super(OrganizationView, self).get_context_data(request)
context['organization'] = organization
context['TEAM_LIST'] = self.get_team_list(request.user, organization)
context['ACCESS'] = request.access.to_django_context()
return context
def has_permission(self, request, organization, *args, **kwargs):
if organization is None:
return False
if self.valid_sso_required:
if not request.access.sso_is_valid:
return False
if self.needs_sso(request, organization):
return False
if self.required_scope and not request.access.has_scope(self.required_scope):
logger.info(
'User %s does not have %s permission to access organization %s', request.user,
self.required_scope, organization
)
return False
return True
def is_auth_required(self, request, organization_slug=None, *args, **kwargs):
result = super(OrganizationView, self).is_auth_required(request, *args, **kwargs)
if result:
return result
# if the user is attempting to access an organization that *may* be
# accessible if they simply re-authenticate, we want to allow that
# this opens up a privacy hole, but the pros outweigh the cons
if not organization_slug:
return False
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if not active_organization:
try:
Organization.objects.get_from_cache(slug=organization_slug)
except Organization.DoesNotExist:
pass
else:
return True
return False
def handle_permission_required(self, request, organization, *args, **kwargs):
if self.needs_sso(request, organization):
logger.info(
'access.must-sso',
extra={
'organization_id': organization.id,
'user_id': request.user.id,
}
)
auth.initiate_login(request, next_url=request.get_full_path())
redirect_uri = reverse('sentry-auth-organization', args=[organization.slug])
else:
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def needs_sso(self, request, organization):
if not organization:
return False
# XXX(dcramer): this branch should really never hit
if not request.user.is_authenticated():
return False
if not self.valid_sso_required:
return False
if not request.access.requires_sso:
return False
if not auth.has_completed_sso(request, organization.id):
return True
if not request.access.sso_is_valid:
return True
return False
def convert_args(self, request, organization_slug=None, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
kwargs['organization'] = active_organization
return (args, kwargs)
def get_allowed_roles(self, request, organization, member=None):
can_admin = request.access.has_scope('member:admin')
allowed_roles = []
if can_admin and not request.is_superuser():
acting_member = OrganizationMember.objects.get(
user=request.user,
organization=organization,
)
if member and roles.get(acting_member.role).priority < roles.get(member.role).priority:
can_admin = False
else:
allowed_roles = [
r for r in roles.get_all()
if r.priority <= roles.get(acting_member.role).priority
]
can_admin = bool(allowed_roles)
elif request.is_superuser():
allowed_roles = roles.get_all()
return (can_admin, allowed_roles, )
class TeamView(OrganizationView):
"""
Any view acting on behalf of a team should inherit from this base and the
matching URL pattern must pass 'team_slug'.
Two keyword arguments are added to the resulting dispatch:
- organization
- team
"""
def get_context_data(self, request, organization, team, **kwargs):
context = super(TeamView, self).get_context_data(request, organization)
context['team'] = team
return context
def has_permission(self, request, organization, team, *args, **kwargs):
if team is None:
return False
rv = super(TeamView, self).has_permission(request, organization)
if not rv:
return rv
if self.required_scope:
if not request.access.has_team_scope(team, self.required_scope):
logger.info(
'User %s does not have %s permission to access team %s', request.user,
self.required_scope, team
)
return False
elif not request.access.has_team(team):
logger.info('User %s does not have access to team %s', request.user, team)
return False
return True
def convert_args(self, request, organization_slug, team_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_team = self.get_active_team(
request=request,
team_slug=team_slug,
organization=active_organization,
)
else:
active_team = None
kwargs['organization'] = active_organization
kwargs['team'] = active_team
return (args, kwargs)
class ProjectView(TeamView):
"""
Any view acting on behalf of a project should inherit from this base and the
matching URL pattern must pass 'team_slug' as well as 'project_slug'.
Three keyword arguments are added to the resulting dispatch:
- organization
- team
- project
"""
def get_context_data(self, request, organization, team, project, **kwargs):
context = super(ProjectView, self).get_context_data(request, organization, team)
context['project'] = project
context['processing_issues'] = serialize(project).get('processingIssues', 0)
return context
def has_permission(self, request, organization, team, project, *args, **kwargs):
if project is None:
return False
if team is None:
return False
rv = super(ProjectView, self).has_permission(request, organization, team)
if not rv:
return rv
if self.required_scope:
if not request.access.has_team_scope(team, self.required_scope):
logger.info(
'User %s does not have %s permission to access project %s', request.user,
self.required_scope, project
)
return False
elif not request.access.has_team(team):
logger.info('User %s does not have access to project %s', request.user, project)
return False
return True
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_project = self.get_active_project(
request=request,
organization=active_organization,
project_slug=project_slug,
)
else:
active_project = None
if active_project:
active_team = active_project.team
else:
active_team = None
kwargs['project'] = active_project
kwargs['team'] = active_team
kwargs['organization'] = active_organization
return (args, kwargs)
|
the-stack_106_23621 | from django.contrib.auth import get_user_model
from django.core.management import call_command
from qcat.tests import TestCase
User = get_user_model()
def create_new_user(id=1, email='[email protected]', lastname='foo', firstname='bar'):
user, created = User.objects.get_or_create(
id=id, email=email,
defaults={'lastname': lastname, 'firstname': firstname}
)
return user
class SuperUserCommandTest(TestCase):
"""
Tests for management commands.
"""
def setUp(self):
self.user = create_new_user()
def tearDown(self):
self.user.delete()
def test_command_set_one_superuser(self):
self.assertFalse(self.user.is_superuser)
call_command('set_superuser', '[email protected]')
# Refresh the object, as it was changed externally.
self.user.refresh_from_db(fields=['is_superuser'])
self.assertTrue(self.user.is_superuser)
def test_command_set_multiple_superusers(self):
make_superuser = ['[email protected]', '[email protected]']
for index, email in enumerate(make_superuser):
create_new_user(id=index + 5, email=email)
call_command('set_superuser', *make_superuser)
superuser_emails = get_user_model().objects.filter(
is_superuser=True
).values_list('email', flat=True)
self.assertListEqual(
sorted(list(superuser_emails)),
sorted(make_superuser))
|
the-stack_106_23623 | from typing import (
Iterator,
NamedTuple,
)
from async_service import (
as_service,
ManagerAPI,
)
from eth_utils import to_tuple
import psutil
from p2p import trio_utils
from trinity.components.builtin.metrics.registry import HostMetricsRegistry
from trinity.exceptions import MetricsReportingError
class CpuStats(NamedTuple):
# Time spent on all processes
global_time: int
# Time spent waiting on IO
global_wait_io: int
class DiskStats(NamedTuple):
# Number of read operations executed
read_count: int
# Number of bytes read
read_bytes: int
# Number of write operations executed
write_count: int
# Number of bytes written
write_bytes: int
class NetworkStats(NamedTuple):
# Number of network packets sent
out_packets: int
# Number of network packets received
in_packets: int
class ProcessStats(NamedTuple):
process_count: int
thread_count: int
class SystemStats(NamedTuple):
cpu_stats: CpuStats
disk_stats: DiskStats
network_stats: NetworkStats
process_stats: ProcessStats
def read_cpu_stats() -> CpuStats:
stats = psutil.cpu_times()
try:
# iowait is only available with linux
iowait = stats.iowait
except AttributeError:
iowait = 0
return CpuStats(
global_time=int(stats.user + stats.nice + stats.system),
global_wait_io=int(iowait),
)
def read_disk_stats() -> DiskStats:
stats = psutil.disk_io_counters()
return DiskStats(
read_count=stats.read_count,
read_bytes=stats.read_bytes,
write_count=stats.write_count,
write_bytes=stats.write_bytes,
)
def read_network_stats() -> NetworkStats:
stats = psutil.net_io_counters()
return NetworkStats(
in_packets=stats.packets_recv,
out_packets=stats.packets_sent,
)
@to_tuple
def get_all_python_processes() -> Iterator[psutil.Process]:
for process in psutil.process_iter():
try:
commands = process.cmdline()
except psutil.AccessDenied:
continue
except psutil.ZombieProcess:
continue
if any('python' in cmd for cmd in commands):
yield process
def get_main_trinity_process() -> psutil.Process:
python_processes = get_all_python_processes()
for process in python_processes:
if any('trinity' in cmd for cmd in process.cmdline()):
return process
raise MetricsReportingError("No 'trinity' process found.")
def read_process_stats() -> ProcessStats:
main_trinity_process = get_main_trinity_process()
child_processes = main_trinity_process.children(recursive=True)
num_processes = len(child_processes) + 1
num_child_threads = sum([process.num_threads() for process in child_processes])
num_threads = num_child_threads + main_trinity_process.num_threads()
return ProcessStats(
process_count=num_processes,
thread_count=num_threads,
)
@as_service
async def collect_process_metrics(manager: ManagerAPI,
registry: HostMetricsRegistry,
frequency_seconds: int) -> None:
previous: SystemStats = None
cpu_sysload_gauge = registry.gauge('trinity.system/cpu/sysload.gauge')
cpu_syswait_gauge = registry.gauge('trinity.system/cpu/syswait.gauge')
disk_readdata_meter = registry.meter('trinity.system/disk/readdata.meter')
disk_writedata_meter = registry.meter('trinity.system/disk/writedata.meter')
network_in_packets_meter = registry.meter('trinity.network/in/packets/total.meter')
network_out_packets_meter = registry.meter('trinity.network/out/packets/total.meter')
process_count_gauge = registry.gauge('trinity.system/processes/count.gauge')
thread_count_gauge = registry.gauge('trinity.system/threads/count.gauge')
async for _ in trio_utils.every(frequency_seconds):
current = SystemStats(
cpu_stats=read_cpu_stats(),
disk_stats=read_disk_stats(),
network_stats=read_network_stats(),
process_stats=read_process_stats(),
)
if previous is not None:
global_time = current.cpu_stats.global_time - previous.cpu_stats.global_time
cpu_sysload_gauge.set_value(global_time / frequency_seconds)
global_wait = current.cpu_stats.global_wait_io - previous.cpu_stats.global_wait_io
cpu_syswait_gauge.set_value(global_wait / frequency_seconds)
read_bytes = current.disk_stats.read_bytes - previous.disk_stats.read_bytes
disk_readdata_meter.mark(read_bytes)
write_bytes = current.disk_stats.write_bytes - previous.disk_stats.write_bytes
disk_writedata_meter.mark(write_bytes)
in_packets = current.network_stats.in_packets - previous.network_stats.in_packets
network_in_packets_meter.mark(in_packets)
out_packets = current.network_stats.out_packets - previous.network_stats.out_packets
network_out_packets_meter.mark(out_packets)
process_count_gauge.set_value(current.process_stats.process_count)
thread_count_gauge.set_value(current.process_stats.thread_count)
previous = current
|
the-stack_106_23626 | """The actual implementation."""
import os
from typing import List
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.transforms.post_transforms import SphinxPostTransform
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import NodeMatcher
def setup(app):
"""Entry point for sphinx theming."""
app.require_sphinx("3.0")
app.add_directive("tab", TabDirective)
app.add_post_transform(TabHtmlTransform)
app.add_node(_TabInput, html=(_TabInput.visit, _TabInput.depart))
app.add_node(_TabLabel, html=(_TabLabel.visit, _TabLabel.depart))
# Include our static assets
static_dir = os.path.join(os.path.dirname(__file__), "static")
app.connect(
"builder-inited", (lambda app: app.config.html_static_path.append(static_dir))
)
app.add_js_file("tabs.js")
app.add_css_file("tabs.css")
class TabContainer(nodes.container):
"""The initial tree-node for holding tab content."""
class _GeneralHTMLTagElement(nodes.Element, nodes.General):
@staticmethod
def visit(translator, node):
attributes = node.attributes.copy()
# Nobody needs this crap.
attributes.pop("ids")
attributes.pop("classes")
attributes.pop("names")
attributes.pop("dupnames")
attributes.pop("backrefs")
text = translator.starttag(node, node.tagname, **attributes)
translator.body.append(text.strip())
@staticmethod
def depart(translator, node):
if node.endtag:
translator.body.append(f"</{node.tagname}>")
class _TabInput(_GeneralHTMLTagElement):
tagname = "input"
endtag = False
class _TabLabel(_GeneralHTMLTagElement):
tagname = "label"
endtag = True
class TabDirective(SphinxDirective):
"""Tabbed content in Sphinx documentation."""
required_arguments = 1 # directive takes a single argument.
final_argument_whitespace = True # this allows that argument to contain spaces.
has_content = True
option_spec = {
"new-set": directives.flag,
}
def run(self):
"""Parse a tabs directive."""
self.assert_has_content()
container = TabContainer("", type="tab", new_set="new-set" in self.options)
self.set_source_info(container)
# Handle the label (non-plain-text variants allowed)
textnodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
label = nodes.rubric(self.arguments[0], *textnodes)
# Handle the content
content = nodes.container("", is_div=True, classes=["tab-content"])
self.state.nested_parse(self.content, self.content_offset, content)
container += label
container += content
return [container]
def _should_start_new_set(node, current_tab_set):
# The current set is empty.
if not current_tab_set:
return False
# Explicitly requested for a new tab set.
if node["new_set"]:
return True
# From here, this code is trying to figure if the given node immediately
# follows the previous tab, and hence should be in the same set.
prev_node = current_tab_set[-1]
if prev_node.parent != node.parent: # Different parent
return True
parent = node.parent
if parent.index(node) - 1 != parent.index(prev_node):
return True
# This node should be in the same set, so don't start a new one.
return False
class TabHtmlTransform(SphinxPostTransform):
"""Transform output of TabDirective into usable chunks."""
default_priority = 200
formats = ["html"]
def run(self):
"""Locate and replace `TabContainer`s."""
matcher = NodeMatcher(TabContainer)
set_counter = 0
current_tab_set = [] # type: List[TabContainer]
for node in self.document.traverse(matcher): # type: TabContainer
if _should_start_new_set(node, current_tab_set):
self.finalize_set(current_tab_set, set_counter)
set_counter += 1
current_tab_set = []
current_tab_set.append(node)
if current_tab_set:
self.finalize_set(current_tab_set, set_counter)
def finalize_set(self, tab_set: List[TabContainer], set_counter: int):
"""Add these TabContainers as a single-set-of-tabs."""
assert tab_set
parent = tab_set[0].parent
container = nodes.container("", is_div=True, classes=["tab-set"])
container.parent = parent
tab_set_name = f"tab-set--{set_counter}"
node_counter = 0
for node in tab_set:
node_counter += 1
tab_id = tab_set_name + f"-input--{node_counter}"
title, content = node.children
# <input>, for storing state in radio boxes.
input_node = _TabInput(
type="radio", ids=[tab_id], name=tab_set_name, classes=["tab-input"]
)
# <label>
label_node = _TabLabel(
"", *title.children, **{"for": tab_id}, classes=["tab-label"]
)
# For error messages
input_node.source = node.source
input_node.line = node.line
label_node.source = node.source
label_node.line = node.line
# Populate with the content.
container += input_node
container += label_node
container += content
container.children[0]["checked"] = True
# Replace all nodes in tab_set, with the container.
start_at = parent.index(tab_set[0])
end_at = parent.index(tab_set[-1])
parent.children = (
parent.children[:start_at] + [container] + parent[end_at + 1 :]
)
|
the-stack_106_23631 | '''
Module : Main
Description : The main entry point for the program.
Copyright : (c) Bernie Pope, 16 Oct 2019
License : MIT
Maintainer : [email protected]
Portability : POSIX
A plotting and data analytics program for the command line
'''
import sys
import logging
import pandas as pd
from hatch.command_base import CommandBase
import hatch.args as args
import hatch.utils as utils
import hatch.constants as const
import hatch.plot
import hatch.transform
import hatch.io
import hatch.pca
import hatch.info
import hatch.stats
import hatch.cluster
def init_logging(log_filename=None):
'''If the log_filename is defined, then
initialise the logging facility, and write log statement
indicating the program has started, and also write out the
command line from sys.argv
Arguments:
log_filename: either None, if logging is not required, or the
string name of the log file to write to
Result:
None
'''
if log_filename is not None:
logging.basicConfig(filename=log_filename,
#level=logging.INFO,
level=logging.CRITICAL,
filemode='w',
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
logging.info('program started')
logging.info('command line: %s', ' '.join(sys.argv))
else:
logging.basicConfig(level=logging.CRITICAL)
def is_first_command_input(commands):
if len(commands) > 0:
first_command = commands[0]
type_first_command = type(first_command)
return (type_first_command is hatch.io.In) or (type_first_command is hatch.io.Stdin)
else:
return False
def is_last_command_transform_or_input(commands):
if len(commands) > 0:
last_command = commands[-1]
type_last_command = type(last_command)
return (last_command.category == 'transformation') or (type_last_command is hatch.io.In) or (type_last_command is hatch.io.Stdin)
else:
return False
# stdin may only be used at most once, and only at the beginning of the command sequence
def stdin_used_safely(commands):
count = 0
for command in commands:
if type(command) is hatch.io.Stdin:
count += 1
if count == 0:
return True
elif count == 1:
return type(commands[0] is hatch.io.Stdin)
else:
return False
def main():
df = None
original_commands = args.parse_commandline()
new_commands = original_commands
init_logging()
if not is_first_command_input(original_commands):
# If the first command is not an explict read of input data
# either from stdin or a file then we add an implicit read from
# stdin to the command stream
stdin_reader = hatch.io.Stdin()
stdin_reader.parse_args()
new_commands = [stdin_reader] + new_commands
if (len(original_commands) == 0) or is_last_command_transform_or_input(original_commands):
# If the last command is a data transformation command or an input command then
# we add an implicit print to stdout to the command stream
stdout_writer = hatch.io.Stdout()
stdout_writer.parse_args()
new_commands = new_commands + [stdout_writer]
if not stdin_used_safely(new_commands):
utils.exit_with_error(f"stdin may only be used at most once, and only as the first command", const.EXIT_COMMAND_LINE_ERROR)
else:
for command in new_commands:
try:
df = command.run(df)
except ValueError as e:
utils.exit_with_error(f"Error: {str(e)}", const.EXIT_COMMAND_LINE_ERROR)
if df is None:
break
logging.info("Completed")
exit(0)
# If this script is run from the command line then call the main function.
if __name__ == '__main__':
main()
|
the-stack_106_23633 | import random
from collections import (
deque,
namedtuple,
)
import numpy as np
import torch
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size, seed, device):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
random.seed(seed)
self.device = device
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(self.device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
self.device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
self.device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) |
the-stack_106_23635 | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_smapi_model.v1.skill.interaction_model.dialog_intents import DialogIntents as DialogIntents_30175679
from ask_smapi_model.v1.skill.interaction_model.delegation_strategy_type import DelegationStrategyType as DelegationStrategyType_41525f1c
class Dialog(object):
"""
Defines dialog rules e.g. slot elicitation and validation, intent chaining etc.
:param delegation_strategy: Defines a delegation strategy for the dialogs in this dialog model.
:type delegation_strategy: (optional) ask_smapi_model.v1.skill.interaction_model.delegation_strategy_type.DelegationStrategyType
:param intents: List of intents that have dialog rules associated with them. Dialogs can also span multiple intents.
:type intents: (optional) list[ask_smapi_model.v1.skill.interaction_model.dialog_intents.DialogIntents]
"""
deserialized_types = {
'delegation_strategy': 'ask_smapi_model.v1.skill.interaction_model.delegation_strategy_type.DelegationStrategyType',
'intents': 'list[ask_smapi_model.v1.skill.interaction_model.dialog_intents.DialogIntents]'
} # type: Dict
attribute_map = {
'delegation_strategy': 'delegationStrategy',
'intents': 'intents'
} # type: Dict
supports_multiple_types = False
def __init__(self, delegation_strategy=None, intents=None):
# type: (Optional[DelegationStrategyType_41525f1c], Optional[List[DialogIntents_30175679]]) -> None
"""Defines dialog rules e.g. slot elicitation and validation, intent chaining etc.
:param delegation_strategy: Defines a delegation strategy for the dialogs in this dialog model.
:type delegation_strategy: (optional) ask_smapi_model.v1.skill.interaction_model.delegation_strategy_type.DelegationStrategyType
:param intents: List of intents that have dialog rules associated with them. Dialogs can also span multiple intents.
:type intents: (optional) list[ask_smapi_model.v1.skill.interaction_model.dialog_intents.DialogIntents]
"""
self.__discriminator_value = None # type: str
self.delegation_strategy = delegation_strategy
self.intents = intents
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Dialog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_23637 | #!/usr/bin/env python
"""Trains a classifier for frames from the Simpsons.
Useful for testing data tables.
"""
# we need this so strings are written to bigquery as strings rather than bytes
from __future__ import unicode_literals
import math
import os
import subprocess
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
import numpy as np
import pandas
import six
import wandb
def main():
run = wandb.init()
config = run.config
config.img_size = 50
config.batch_size = 32
config.epochs = 0
config.train_path = os.path.join('simpsons', 'train')
config.test_path = os.path.join('simpsons', 'test')
# download the data if it doesn't exist
if not os.path.exists("simpsons"):
print("Downloading Simpsons dataset...")
subprocess.check_output(
"curl https://storage.googleapis.com/wandb-production.appspot.com/mlclass/simpsons.tar.gz | tar xvz", shell=True)
# this is the augmentation configuration we will use for training
# see: https://keras.io/preprocessing/image/#imagedatagenerator-class
train_datagen = ImageDataGenerator(
rescale=1./255)
# only rescaling augmentation for testing:
test_datagen = ImageDataGenerator(rescale=1./255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
config.train_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size)
# this is a similar generator, for validation data
test_generator = test_datagen.flow_from_directory(
config.test_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(
config.img_size, config.img_size, 3), activation="relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(50, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(13, activation="softmax"))
model.compile(optimizer=optimizers.Adam(),
loss='categorical_crossentropy', metrics=['accuracy'])
def results_data_frame(test_datagen, model):
gen = test_datagen.flow_from_directory(
config.test_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size, shuffle=False)
class_cols = []
class_names = []
for class_col, i in sorted(six.iteritems(gen.class_indices), key=lambda c_i: c_i[1]):
class_cols.append(class_col)
class_names.append(class_col.replace('_', ' '))
cards = []
true_class_is = []
true_classes = []
true_probs = []
pred_classes = []
pred_probs = []
class_probs = [[] for c in class_names]
num_batches = int(math.ceil(len(gen.filenames) / float(gen.batch_size)))
#num_batches = 1
for batch_i in range(num_batches):
examples, truth = next(gen)
preds = model.predict(np.stack(examples))
this_true_class_is = [np.argmax(probs) for probs in truth]
true_class_is.extend(this_true_class_is)
true_classes.extend(class_names[i] for i in this_true_class_is)
true_probs.extend(ps[i] for ps, i in zip(preds, true_class_is))
pred_classes.extend(class_names[np.argmax(probs)] for probs in preds)
pred_probs.extend(np.max(probs) for probs in preds)
for cp, p in zip(class_probs, preds.T):
cp.extend(p)
base_i = batch_i * gen.batch_size
for i in range(base_i, base_i + len(examples)):
cards.append('''```Predicted:
{pred_class} ({pred_prob:.2%})
Actual:
{true_class} ({true_prob:.2%})

```'''.format(true_class=true_classes[i], true_prob=true_probs[i], pred_class=pred_classes[i], pred_prob=pred_probs[i], idx=i))
all_cols = ['wandb_example_id', 'image', 'card', 'true_class', 'true_prob', 'pred_class', 'pred_prob'] + class_cols
frame_dict = {
'wandb_example_id': [six.text_type(s) for s in gen.filenames[:len(cards)]],
'image': [wandb.Image(os.path.join(config.test_path, f)) for f in gen.filenames[:len(cards)]],
'card': cards,
'true_class': true_classes,
'true_prob': true_probs,
'pred_class': pred_classes,
'pred_prob': pred_probs,
}
for c, col in zip(class_cols, class_probs):
frame_dict[c] = col
table = pandas.DataFrame(frame_dict, columns=all_cols)
number_cols = ['true_prob', 'pred_prob'] + class_cols
table[number_cols] = table[number_cols].apply(pandas.to_numeric)
#from IPython import embed; embed()
return table
class ResultsDataFrameCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
run.summary["results"] = results_data_frame(test_datagen, model)
model.fit_generator(
train_generator,
steps_per_epoch=len(train_generator),
epochs=config.epochs,
workers=4,
callbacks=[ResultsDataFrameCallback()],
validation_data=test_generator,
validation_steps=len(test_generator))
if config.epochs == 0:
#run.summary["results"] = results_data_frame(test_datagen, model)
run.summary.update({ "results3": results_data_frame(test_datagen, model) })
if __name__ == '__main__':
main()
|
the-stack_106_23638 | '''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import csv
from models import *
from utils import progress_bar, mixup_data, mixup_criterion
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--sess', default='mixup_default', type=str, help='session id')
parser.add_argument('--seed', default=0, type=int, help='rng seed')
parser.add_argument('--alpha', default=1., type=float, help='interpolation strength (uniform=1., ERM=0.)')
parser.add_argument('--decay', default=1e-4, type=float, help='weight decay (default=1e-4)')
args = parser.parse_args()
torch.manual_seed(args.seed)
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
batch_size = 128
base_learning_rate = 0.1
if use_cuda:
# data parallel
n_gpu = torch.cuda.device_count()
batch_size *= n_gpu
base_learning_rate *= n_gpu
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7.' + args.sess + '_' + str(args.seed))
net = checkpoint['net']
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch'] + 1
torch.set_rng_state(checkpoint['rng_state'])
else:
print('==> Building model..')
# net = VGG('VGG19')
net = PreActResNet18()
# net = ResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
result_folder = './results/'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
logname = result_folder + net.__class__.__name__ + '_' + args.sess + '_' + str(args.seed) + '.csv'
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net)
print('Using', torch.cuda.device_count(), 'GPUs.')
cudnn.benchmark = True
print('Using CUDA..')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=base_learning_rate, momentum=0.9, weight_decay=args.decay)
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# generate mixed inputs, two one-hot label vectors and mixing coefficient
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, args.alpha, use_cuda)
optimizer.zero_grad()
inputs, targets_a, targets_b = Variable(inputs), Variable(targets_a), Variable(targets_b)
outputs = net(inputs)
loss_func = mixup_criterion(targets_a, targets_b, lam)
loss = loss_func(criterion, outputs)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += lam * predicted.eq(targets_a.data).cpu().sum() + (1 - lam) * predicted.eq(targets_b.data).cpu().sum()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
return (train_loss/batch_idx, 100.*correct/total)
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
best_acc = acc
checkpoint(acc, epoch)
return (test_loss/batch_idx, 100.*correct/total)
def checkpoint(acc, epoch):
# Save checkpoint.
print('Saving..')
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'rng_state': torch.get_rng_state()
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.t7.' + args.sess + '_' + str(args.seed))
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate at 100 and 150 epoch"""
lr = base_learning_rate
if epoch <= 9 and lr > 0.1:
# warm-up training for large minibatch
lr = 0.1 + (base_learning_rate - 0.1) * epoch / 10.
if epoch >= 100:
lr /= 10
if epoch >= 150:
lr /= 10
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if not os.path.exists(logname):
with open(logname, 'w') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow(['epoch', 'train loss', 'train acc', 'test loss', 'test acc'])
for epoch in range(start_epoch, 200):
adjust_learning_rate(optimizer, epoch)
train_loss, train_acc = train(epoch)
test_loss, test_acc = test(epoch)
with open(logname, 'a') as logfile:
logwriter = csv.writer(logfile, delimiter=',')
logwriter.writerow([epoch, train_loss, train_acc, test_loss, test_acc])
|
the-stack_106_23640 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import re
from frappe import _
from frappe.utils import (flt, getdate, get_first_day, get_last_day, date_diff,
add_months, add_days, formatdate, cint)
from erpnext.accounts.utils import get_fiscal_year
def get_period_list(from_fiscal_year, to_fiscal_year, periodicity, accumulated_values=False,
company=None, reset_period_on_fy_change=True):
"""Get a list of dict {"from_date": from_date, "to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
fiscal_year = get_fiscal_year_data(from_fiscal_year, to_fiscal_year)
validate_fiscal_year(fiscal_year, from_fiscal_year, to_fiscal_year)
# start with first day, so as to avoid year to_dates like 2-April if ever they occur]
year_start_date = getdate(fiscal_year.year_start_date)
year_end_date = getdate(fiscal_year.year_end_date)
months_to_add = {
"Yearly": 12,
"Half-Yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
start_date = year_start_date
months = get_months(year_start_date, year_end_date)
for i in xrange(months / months_to_add):
period = frappe._dict({
"from_date": start_date
})
to_date = add_months(start_date, months_to_add)
start_date = to_date
if to_date == get_first_day(to_date):
# if to_date is the first day, get the last day of previous month
to_date = add_days(to_date, -1)
if to_date <= year_end_date:
# the normal case
period.to_date = to_date
else:
# if a fiscal year ends before a 12 month period
period.to_date = year_end_date
period.to_date_fiscal_year = get_fiscal_year(period.to_date, company=company)[0]
period.from_date_fiscal_year_start_date = get_fiscal_year(period.from_date, company=company)[1]
period_list.append(period)
if period.to_date == year_end_date:
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
if periodicity == "Monthly" and not accumulated_values:
label = formatdate(opts["to_date"], "MMM YYYY")
else:
if not accumulated_values:
label = get_label(periodicity, opts["from_date"], opts["to_date"])
else:
if reset_period_on_fy_change:
label = get_label(periodicity, opts.from_date_fiscal_year_start_date, opts["to_date"])
else:
label = get_label(periodicity, period_list[0].from_date, opts["to_date"])
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": year_start_date,
"year_end_date": year_end_date
})
return period_list
def get_fiscal_year_data(from_fiscal_year, to_fiscal_year):
fiscal_year = frappe.db.sql("""select min(year_start_date) as year_start_date,
max(year_end_date) as year_end_date from `tabFiscal Year` where
name between %(from_fiscal_year)s and %(to_fiscal_year)s""",
{'from_fiscal_year': from_fiscal_year, 'to_fiscal_year': to_fiscal_year}, as_dict=1)
return fiscal_year[0] if fiscal_year else {}
def validate_fiscal_year(fiscal_year, from_fiscal_year, to_fiscal_year):
if not fiscal_year.get('year_start_date') and not fiscal_year.get('year_end_date'):
frappe.throw(_("End Year cannot be before Start Year"))
def get_months(start_date, end_date):
diff = (12 * end_date.year + end_date.month) - (12 * start_date.year + start_date.month)
return diff + 1
def get_label(periodicity, from_date, to_date):
if periodicity=="Yearly":
if formatdate(from_date, "YYYY") == formatdate(to_date, "YYYY"):
label = formatdate(from_date, "YYYY")
else:
label = formatdate(from_date, "YYYY") + "-" + formatdate(to_date, "YYYY")
else:
label = formatdate(from_date, "MMM YY") + "-" + formatdate(to_date, "MMM YY")
return label
def get_data(company, root_type, balance_must_be, period_list, filters=None,
accumulated_values=1, only_current_fiscal_year=True, ignore_closing_entries=False,
ignore_accumulated_values_for_fy=False):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
company_currency = frappe.db.get_value("Company", company, "default_currency")
gl_entries_by_account = {}
for root in frappe.db.sql("""select lft, rgt from tabAccount
where root_type=%s and ifnull(parent_account, '') = ''""", root_type, as_dict=1):
set_gl_entries_by_account(company,
period_list[0]["year_start_date"] if only_current_fiscal_year else None,
period_list[-1]["to_date"],
root.lft, root.rgt, filters,
gl_entries_by_account, ignore_closing_entries=ignore_closing_entries)
calculate_values(accounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy)
accumulate_values_into_parents(accounts, accounts_by_name, period_list, accumulated_values)
out = prepare_data(accounts, balance_must_be, period_list, company_currency)
out = filter_out_zero_value_rows(out, parent_children_map)
if out:
add_total_row(out, root_type, balance_must_be, period_list, company_currency)
return out
def calculate_values(accounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy):
for entries in gl_entries_by_account.values():
for entry in entries:
d = accounts_by_name.get(entry.account)
if not d:
frappe.msgprint(
_("Could not retrieve information for {0}.".format(entry.account)), title="Error",
raise_exception=1
)
for period in period_list:
# check if posting date is within the period
if entry.posting_date <= period.to_date:
if (accumulated_values or entry.posting_date >= period.from_date) and \
(not ignore_accumulated_values_for_fy or
entry.fiscal_year == period.to_date_fiscal_year):
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
if entry.posting_date < period_list[0].year_start_date:
d["opening_balance"] = d.get("opening_balance", 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list, accumulated_values):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = \
accounts_by_name[d.parent_account].get(period.key, 0.0) + d.get(period.key, 0.0)
accounts_by_name[d.parent_account]["opening_balance"] = \
accounts_by_name[d.parent_account].get("opening_balance", 0.0) + d.get("opening_balance", 0.0)
def prepare_data(accounts, balance_must_be, period_list, company_currency):
data = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
total = 0
row = frappe._dict({
"account_name": _(d.account_name),
"account": _(d.name),
"parent_account": _(d.parent_account),
"indent": flt(d.indent),
"year_start_date": year_start_date,
"year_end_date": year_end_date,
"currency": company_currency,
"opening_balance": d.get("opening_balance", 0.0) * (1 if balance_must_be=="Debit" else -1)
})
for period in period_list:
if d.get(period.key) and balance_must_be=="Credit":
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= -1
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
total += flt(row[period.key])
row["has_value"] = has_value
row["total"] = total
data.append(row)
return data
def filter_out_zero_value_rows(data, parent_children_map, show_zero_values=False):
data_with_value = []
for d in data:
if show_zero_values or d.get("has_value"):
data_with_value.append(d)
else:
# show group with zero balance, if there are balances against child
children = [child.name for child in parent_children_map.get(d.get("account")) or []]
if children:
for row in data:
if row.get("account") in children and row.get("has_value"):
data_with_value.append(d)
break
return data_with_value
def add_total_row(out, root_type, balance_must_be, period_list, company_currency):
total_row = {
"account_name": "'" + _("Total {0} ({1})").format(_(root_type), _(balance_must_be)) + "'",
"account": "'" + _("Total {0} ({1})").format(_(root_type), _(balance_must_be)) + "'",
"currency": company_currency
}
for row in out:
if not row.get("parent_account"):
for period in period_list:
total_row.setdefault(period.key, 0.0)
total_row[period.key] += row.get(period.key, 0.0)
row[period.key] = 0.0
total_row.setdefault("total", 0.0)
total_row["total"] += flt(row["total"])
row["total"] = ""
if total_row.has_key("total"):
out.append(total_row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
return frappe.db.sql("""select name, parent_account, lft, rgt, root_type, report_type, account_name from `tabAccount`
where company=%s and root_type=%s order by lft""", (company, root_type), as_dict=True)
def filter_accounts(accounts, depth=10):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
children = parent_children_map.get(parent) or []
if parent == None:
sort_root_accounts(children)
for child in children:
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name, parent_children_map
def sort_root_accounts(roots):
"""Sort root types as Asset, Liability, Equity, Income, Expense"""
def compare_roots(a, b):
if a.value and re.split('\W+', a.value)[0].isdigit():
# if chart of accounts is numbered, then sort by number
return cmp(a.value, b.value)
if a.report_type != b.report_type and a.report_type == "Balance Sheet":
return -1
if a.root_type != b.root_type and a.root_type == "Asset":
return -1
if a.root_type == "Liability" and b.root_type == "Equity":
return -1
if a.root_type == "Income" and b.root_type == "Expense":
return -1
return 1
roots.sort(compare_roots)
def set_gl_entries_by_account(company, from_date, to_date, root_lft, root_rgt, filters, gl_entries_by_account,
ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = get_additional_conditions(from_date, ignore_closing_entries, filters)
draft_gl_entries = get_draft_gl_entries(filters,from_date,to_date)
accounts_filterd = frappe.db.sql("""select name from `tabAccount`
where company =%(company)s and lft >= %(lft)s and rgt <= %(rgt)s""",
{
"company": company,
"lft": root_lft,
"rgt": root_rgt
})
print ("accounts_filterd")
print (accounts_filterd)
#~ if filters.get("project"):
#~ additional_conditions.append("project = '%s'"%(frappe.db.escape(filters.get("project"))))
#~ if filters.get("cost_center"):
#~ additional_conditions.append(get_cost_center_cond(filters.get("cost_center")))
#~ filterd_draft_gl_entries = draft_gl_entries
filterd_draft_gl_entries = []
for draft_gl in draft_gl_entries :
if (draft_gl.account,) in accounts_filterd:
filterd_draft_gl_entries.append(draft_gl)
gl_entries = frappe.db.sql("""select posting_date, account, debit, credit, is_opening, fiscal_year from `tabGL Entry`
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and account in (select name from `tabAccount`
where lft >= %(lft)s and rgt <= %(rgt)s)
order by account, posting_date""".format(additional_conditions=additional_conditions),
{
"company": company,
"from_date": from_date,
"to_date": to_date,
"lft": root_lft,
"rgt": root_rgt
},
as_dict=True)
if filters.get("show_draft") !=1 and filters.get("show_submitted") != 1 :
gl_entries = []
elif filters.get("show_draft") == 1 and filters.get("show_submitted") != 1 :
gl_entries = filterd_draft_gl_entries
elif filters.get("show_draft") != 1 and filters.get("show_submitted") == 1 :
pass
elif filters.get("show_draft") == 1 and filters.get("show_submitted") == 1 :
gl_entries.extend(filterd_draft_gl_entries)
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_draft_gl_entries(filters,from_date,to_date):
from frappe.utils import cint, getdate, formatdate
target_docs_list = ["Payment Entry","Purchase Invoice","Expense Claim","Journal Entry",
"Sales Invoice","Purchase Receipt","Delivery Note"]
gl_entries = []
for target_doc in target_docs_list :
get_all_docs = frappe.get_list(target_doc,fields=['name'],
filters=[
['docstatus',"=", 0],
['company',"=", filters.get('company')],
["posting_date",">=",getdate(from_date)],
["posting_date","<=",getdate(to_date)]
])
for doc_name in get_all_docs :
doc = frappe.get_doc(target_doc,doc_name["name"])
if target_doc == "Payment Entry":
if doc.payment_type in ("Receive", "Pay") and not doc.get("party_account_field"):
doc.setup_party_account_field()
doc.add_party_gl_entries(gl_entries)
doc.add_bank_gl_entries(gl_entries)
doc.add_deductions_gl_entries(gl_entries)
if target_doc == "Journal Entry":
gl_map = []
for d in doc.get("accounts"):
if d.debit or d.credit:
gl_map.append(
doc.get_gl_dict({
"account": d.account,
"party_type": d.party_type,
"party": d.party,
"against": d.against_account,
"debit": flt(d.debit, d.precision("debit")),
"credit": flt(d.credit, d.precision("credit")),
"account_currency": d.account_currency,
"debit_in_account_currency": flt(d.debit_in_account_currency, d.precision("debit_in_account_currency")),
"credit_in_account_currency": flt(d.credit_in_account_currency, d.precision("credit_in_account_currency")),
"against_voucher_type": d.reference_type,
"against_voucher": d.reference_name,
"remarks": doc.remark,
"cost_center": d.cost_center,
"project": d.project
})
)
gl_entries.extend(gl_map)
if target_doc in ["Purchase Receipt"]:
from erpnext.stock import get_warehouse_account_map
warehouse_account = get_warehouse_account_map()
gl_entries.extend(doc.get_gl_entries(warehouse_account))
if target_doc in ["Purchase Invoice","Expense Claim","Sales Invoice","Delivery Note"]:
gl_entries.extend(doc.get_gl_entries())
return gl_entries
def get_additional_conditions(from_date, ignore_closing_entries, filters):
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("posting_date >= %(from_date)s")
if filters:
if filters.get("project"):
additional_conditions.append("project = '%s'"%(frappe.db.escape(filters.get("project"))))
if filters.get("cost_center"):
additional_conditions.append(get_cost_center_cond(filters.get("cost_center")))
return " and {}".format(" and ".join(additional_conditions)) if additional_conditions else ""
def get_cost_center_cond(cost_center):
lft, rgt = frappe.db.get_value("Cost Center", cost_center, ["lft", "rgt"])
return (""" cost_center in (select name from `tabCost Center` where lft >=%s and rgt <=%s)"""%(lft, rgt))
def get_columns(periodicity, period_list, accumulated_values=1, company=None):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
if company:
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
})
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"options": "currency",
"width": 150
})
if periodicity!="Yearly":
if not accumulated_values:
columns.append({
"fieldname": "total",
"label": _("Total"),
"fieldtype": "Currency",
"width": 150
})
return columns
|
the-stack_106_23642 | # coding: utf-8
import pprint
import re
import six
class ProxyTokenDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'access_token': 'str',
'long_access_token': 'str',
'valid_period': 'int',
'middle_end_url': 'str',
'middle_end_inner_url': 'str',
'enable_rerouting': 'bool'
}
attribute_map = {
'access_token': 'accessToken',
'long_access_token': 'longAccessToken',
'valid_period': 'validPeriod',
'middle_end_url': 'middleEndUrl',
'middle_end_inner_url': 'middleEndInnerUrl',
'enable_rerouting': 'enableRerouting'
}
def __init__(self, access_token=None, long_access_token=None, valid_period=None, middle_end_url=None, middle_end_inner_url=None, enable_rerouting=None):
"""ProxyTokenDTO - a model defined in huaweicloud sdk"""
self._access_token = None
self._long_access_token = None
self._valid_period = None
self._middle_end_url = None
self._middle_end_inner_url = None
self._enable_rerouting = None
self.discriminator = None
self.access_token = access_token
if long_access_token is not None:
self.long_access_token = long_access_token
if valid_period is not None:
self.valid_period = valid_period
if middle_end_url is not None:
self.middle_end_url = middle_end_url
if middle_end_inner_url is not None:
self.middle_end_inner_url = middle_end_inner_url
if enable_rerouting is not None:
self.enable_rerouting = enable_rerouting
@property
def access_token(self):
"""Gets the access_token of this ProxyTokenDTO.
代理鉴权服务器的短token字符串
:return: The access_token of this ProxyTokenDTO.
:rtype: str
"""
return self._access_token
@access_token.setter
def access_token(self, access_token):
"""Sets the access_token of this ProxyTokenDTO.
代理鉴权服务器的短token字符串
:param access_token: The access_token of this ProxyTokenDTO.
:type: str
"""
self._access_token = access_token
@property
def long_access_token(self):
"""Gets the long_access_token of this ProxyTokenDTO.
代理鉴权服务器的长token字符串
:return: The long_access_token of this ProxyTokenDTO.
:rtype: str
"""
return self._long_access_token
@long_access_token.setter
def long_access_token(self, long_access_token):
"""Sets the long_access_token of this ProxyTokenDTO.
代理鉴权服务器的长token字符串
:param long_access_token: The long_access_token of this ProxyTokenDTO.
:type: str
"""
self._long_access_token = long_access_token
@property
def valid_period(self):
"""Gets the valid_period of this ProxyTokenDTO.
token有效时长,单位:秒。
:return: The valid_period of this ProxyTokenDTO.
:rtype: int
"""
return self._valid_period
@valid_period.setter
def valid_period(self, valid_period):
"""Sets the valid_period of this ProxyTokenDTO.
token有效时长,单位:秒。
:param valid_period: The valid_period of this ProxyTokenDTO.
:type: int
"""
self._valid_period = valid_period
@property
def middle_end_url(self):
"""Gets the middle_end_url of this ProxyTokenDTO.
中台地址。
:return: The middle_end_url of this ProxyTokenDTO.
:rtype: str
"""
return self._middle_end_url
@middle_end_url.setter
def middle_end_url(self, middle_end_url):
"""Sets the middle_end_url of this ProxyTokenDTO.
中台地址。
:param middle_end_url: The middle_end_url of this ProxyTokenDTO.
:type: str
"""
self._middle_end_url = middle_end_url
@property
def middle_end_inner_url(self):
"""Gets the middle_end_inner_url of this ProxyTokenDTO.
中台内网地址
:return: The middle_end_inner_url of this ProxyTokenDTO.
:rtype: str
"""
return self._middle_end_inner_url
@middle_end_inner_url.setter
def middle_end_inner_url(self, middle_end_inner_url):
"""Sets the middle_end_inner_url of this ProxyTokenDTO.
中台内网地址
:param middle_end_inner_url: The middle_end_inner_url of this ProxyTokenDTO.
:type: str
"""
self._middle_end_inner_url = middle_end_inner_url
@property
def enable_rerouting(self):
"""Gets the enable_rerouting of this ProxyTokenDTO.
是否开启二次路由
:return: The enable_rerouting of this ProxyTokenDTO.
:rtype: bool
"""
return self._enable_rerouting
@enable_rerouting.setter
def enable_rerouting(self, enable_rerouting):
"""Sets the enable_rerouting of this ProxyTokenDTO.
是否开启二次路由
:param enable_rerouting: The enable_rerouting of this ProxyTokenDTO.
:type: bool
"""
self._enable_rerouting = enable_rerouting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProxyTokenDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_23646 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
import pytest
import pickle
from sklearn.linear_model import LinearRegression, Lasso, LassoCV, LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, PolynomialFeatures
from sklearn.model_selection import KFold, GroupKFold
from econml.dml import DML, LinearDML, SparseLinearDML, KernelDML, CausalForestDML
from econml.dml import NonParamDML
import numpy as np
from econml.utilities import shape, hstack, vstack, reshape, cross_product
from econml.inference import BootstrapInference
from contextlib import ExitStack
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
import itertools
from econml.sklearn_extensions.linear_model import WeightedLasso, StatsModelsRLM
from econml.tests.test_statsmodels import _summarize
import econml.tests.utilities # bugfix for assertWarns
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
# all solutions to underdetermined (or exactly determined) Ax=b are given by A⁺b+(I-A⁺A)w for some arbitrary w
# note that if Ax=b is overdetermined, this will raise an assertion error
def rand_sol(A, b):
"""Generate a random solution to the equation Ax=b."""
assert np.linalg.matrix_rank(A) <= len(b)
A_plus = np.linalg.pinv(A)
x = A_plus @ b
return x + (np.eye(x.shape[0]) - A_plus @ A) @ np.random.normal(size=x.shape)
class TestDML(unittest.TestCase):
def test_cate_api(self):
"""Test that we correctly implement the CATE API."""
n_c = 20 # number of rows for continuous models
n_d = 30 # number of rows for discrete models
def make_random(n, is_discrete, d):
if d is None:
return None
sz = (n, d) if d >= 0 else (n,)
if is_discrete:
while True:
arr = np.random.choice(['a', 'b', 'c'], size=sz)
# ensure that we've got at least 6 of every element
# 2 outer splits, 3 inner splits when model_t is 'auto' and treatment is discrete
# NOTE: this number may need to change if the default number of folds in
# WeightedStratifiedKFold changes
_, counts = np.unique(arr, return_counts=True)
if len(counts) == 3 and counts.min() > 5:
return arr
else:
return np.random.normal(size=sz)
for d_t in [2, 1, -1]:
for is_discrete in [True, False] if d_t <= 1 else [False]:
for d_y in [3, 1, -1]:
for d_x in [2, None]:
for d_w in [2, None]:
n = n_d if is_discrete else n_c
W, X, Y, T = [make_random(n, is_discrete, d)
for is_discrete, d in [(False, d_w),
(False, d_x),
(False, d_y),
(is_discrete, d_t)]]
for featurizer, fit_cate_intercept in\
[(None, True),
(PolynomialFeatures(degree=2, include_bias=False), True),
(PolynomialFeatures(degree=2, include_bias=True), False)]:
d_t_final = 2 if is_discrete else d_t
effect_shape = (n,) + ((d_y,) if d_y > 0 else ())
effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1), 6)
marginal_effect_shape = ((n,) +
((d_y,) if d_y > 0 else ()) +
((d_t_final,) if d_t_final > 0 else ()))
marginal_effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1),
6 * (d_t_final if d_t_final > 0 else 1))
# since T isn't passed to const_marginal_effect, defaults to one row if X is None
const_marginal_effect_shape = ((n if d_x else 1,) +
((d_y,) if d_y > 0 else ()) +
((d_t_final,) if d_t_final > 0 else()))
const_marginal_effect_summaryframe_shape = (
(n if d_x else 1) * (d_y if d_y > 0 else 1),
6 * (d_t_final if d_t_final > 0 else 1))
fd_x = featurizer.fit_transform(X).shape[1:] if featurizer and d_x\
else ((d_x,) if d_x else (0,))
coef_shape = Y.shape[1:] + (T.shape[1:] if not is_discrete else (2,)) + fd_x
coef_summaryframe_shape = (
(d_y if d_y > 0 else 1) * (fd_x[0] if fd_x[0] > 0 else 1),
(d_t_final if d_t_final > 0 else 1) * 6)
intercept_shape = Y.shape[1:] + (T.shape[1:] if not is_discrete else (2,))
intercept_summaryframe_shape = (
(d_y if d_y > 0 else 1), (d_t_final if d_t_final > 0 else 1) * 6)
model_t = LogisticRegression() if is_discrete else Lasso()
all_infs = [None, 'auto', BootstrapInference(2)]
for est, multi, infs in\
[(DML(model_y=Lasso(),
model_t=model_t,
model_final=Lasso(alpha=0.1, fit_intercept=False),
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
discrete_treatment=is_discrete),
True,
[None] +
([BootstrapInference(n_bootstrap_samples=20)] if not is_discrete else [])),
(DML(model_y=Lasso(),
model_t=model_t,
model_final=StatsModelsRLM(fit_intercept=False),
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
discrete_treatment=is_discrete),
True,
['auto']),
(LinearDML(model_y=Lasso(),
model_t='auto',
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
discrete_treatment=is_discrete),
True,
all_infs),
(SparseLinearDML(model_y=WeightedLasso(),
model_t=model_t,
featurizer=featurizer,
fit_cate_intercept=fit_cate_intercept,
discrete_treatment=is_discrete),
True,
[None, 'auto'] +
([BootstrapInference(n_bootstrap_samples=20)] if not is_discrete else [])),
(KernelDML(model_y=WeightedLasso(),
model_t=model_t,
fit_cate_intercept=fit_cate_intercept,
discrete_treatment=is_discrete),
False,
[None]),
(CausalForestDML(model_y=WeightedLasso(),
model_t=model_t,
featurizer=featurizer,
n_estimators=4,
n_jobs=1,
discrete_treatment=is_discrete),
True,
['auto', 'blb'])]:
if not(multi) and d_y > 1:
continue
if X is None and isinstance(est, CausalForestDML):
continue
# ensure we can serialize the unfit estimator
pickle.dumps(est)
for inf in infs:
with self.subTest(d_w=d_w, d_x=d_x, d_y=d_y, d_t=d_t,
is_discrete=is_discrete, est=est, inf=inf):
if X is None and (not fit_cate_intercept):
with pytest.raises(AttributeError):
est.fit(Y, T, X=X, W=W, inference=inf)
continue
est.fit(Y, T, X=X, W=W, inference=inf)
# ensure we can pickle the fit estimator
pickle.dumps(est)
# make sure we can call the marginal_effect and effect methods
const_marg_eff = est.const_marginal_effect(X)
marg_eff = est.marginal_effect(T, X)
self.assertEqual(shape(marg_eff), marginal_effect_shape)
self.assertEqual(shape(const_marg_eff), const_marginal_effect_shape)
np.testing.assert_allclose(
marg_eff if d_x else marg_eff[0:1], const_marg_eff)
assert isinstance(est.score_, float)
for score in est.nuisance_scores_y:
assert isinstance(score, float)
for score in est.nuisance_scores_t:
assert isinstance(score, float)
T0 = np.full_like(T, 'a') if is_discrete else np.zeros_like(T)
eff = est.effect(X, T0=T0, T1=T)
self.assertEqual(shape(eff), effect_shape)
if ((not isinstance(est, KernelDML)) and
(not isinstance(est, CausalForestDML))):
self.assertEqual(shape(est.coef_), coef_shape)
if fit_cate_intercept:
self.assertEqual(shape(est.intercept_), intercept_shape)
else:
with pytest.raises(AttributeError):
self.assertEqual(shape(est.intercept_), intercept_shape)
if inf is not None:
const_marg_eff_int = est.const_marginal_effect_interval(X)
marg_eff_int = est.marginal_effect_interval(T, X)
self.assertEqual(shape(marg_eff_int),
(2,) + marginal_effect_shape)
self.assertEqual(shape(const_marg_eff_int),
(2,) + const_marginal_effect_shape)
self.assertEqual(shape(est.effect_interval(X, T0=T0, T1=T)),
(2,) + effect_shape)
if ((not isinstance(est, KernelDML)) and
(not isinstance(est, CausalForestDML))):
self.assertEqual(shape(est.coef__interval()),
(2,) + coef_shape)
if fit_cate_intercept:
self.assertEqual(shape(est.intercept__interval()),
(2,) + intercept_shape)
else:
with pytest.raises(AttributeError):
self.assertEqual(shape(est.intercept__interval()),
(2,) + intercept_shape)
const_marg_effect_inf = est.const_marginal_effect_inference(X)
T1 = np.full_like(T, 'b') if is_discrete else T
effect_inf = est.effect_inference(X, T0=T0, T1=T1)
marg_effect_inf = est.marginal_effect_inference(T, X)
# test const marginal inference
self.assertEqual(shape(const_marg_effect_inf.summary_frame()),
const_marginal_effect_summaryframe_shape)
self.assertEqual(shape(const_marg_effect_inf.point_estimate),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.stderr),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.var),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.pvalue()),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.zstat()),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.conf_int()),
(2,) + const_marginal_effect_shape)
np.testing.assert_array_almost_equal(
const_marg_effect_inf.conf_int()[0],
const_marg_eff_int[0], decimal=5)
const_marg_effect_inf.population_summary()._repr_html_()
# test effect inference
self.assertEqual(shape(effect_inf.summary_frame()),
effect_summaryframe_shape)
self.assertEqual(shape(effect_inf.point_estimate),
effect_shape)
self.assertEqual(shape(effect_inf.stderr),
effect_shape)
self.assertEqual(shape(effect_inf.var),
effect_shape)
self.assertEqual(shape(effect_inf.pvalue()),
effect_shape)
self.assertEqual(shape(effect_inf.zstat()),
effect_shape)
self.assertEqual(shape(effect_inf.conf_int()),
(2,) + effect_shape)
np.testing.assert_array_almost_equal(
effect_inf.conf_int()[0],
est.effect_interval(X, T0=T0, T1=T1)[0], decimal=5)
effect_inf.population_summary()._repr_html_()
# test marginal effect inference
self.assertEqual(shape(marg_effect_inf.summary_frame()),
marginal_effect_summaryframe_shape)
self.assertEqual(shape(marg_effect_inf.point_estimate),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.stderr),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.var),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.pvalue()),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.zstat()),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.conf_int()),
(2,) + marginal_effect_shape)
np.testing.assert_array_almost_equal(
marg_effect_inf.conf_int()[0], marg_eff_int[0], decimal=5)
marg_effect_inf.population_summary()._repr_html_()
# test coef__inference and intercept__inference
if ((not isinstance(est, KernelDML)) and
(not isinstance(est, CausalForestDML))):
if X is not None:
self.assertEqual(
shape(est.coef__inference().summary_frame()),
coef_summaryframe_shape)
np.testing.assert_array_almost_equal(
est.coef__inference().conf_int()
[0], est.coef__interval()[0], decimal=5)
if fit_cate_intercept:
cm = ExitStack()
# ExitStack can be used as a "do nothing" ContextManager
else:
cm = pytest.raises(AttributeError)
with cm:
self.assertEqual(shape(est.intercept__inference().
summary_frame()),
intercept_summaryframe_shape)
np.testing.assert_array_almost_equal(
est.intercept__inference().conf_int()
[0], est.intercept__interval()[0], decimal=5)
est.summary()
est.score(Y, T, X, W)
if isinstance(est, CausalForestDML):
np.testing.assert_array_equal(est.feature_importances_.shape,
((d_y,) if d_y > 0 else()) + fd_x)
# make sure we can call effect with implied scalar treatments,
# no matter the dimensions of T, and also that we warn when there
# are multiple treatments
if d_t > 1:
cm = self.assertWarns(Warning)
else:
# ExitStack can be used as a "do nothing" ContextManager
cm = ExitStack()
with cm:
effect_shape2 = (n if d_x else 1,) + ((d_y,) if d_y > 0 else())
eff = est.effect(X) if not is_discrete else est.effect(
X, T0='a', T1='b')
self.assertEqual(shape(eff), effect_shape2)
def test_cate_api_nonparam(self):
"""Test that we correctly implement the CATE API."""
n = 20
def make_random(is_discrete, d):
if d is None:
return None
sz = (n, d) if d >= 0 else (n,)
if is_discrete:
while True:
arr = np.random.choice(['a', 'b'], size=sz)
# ensure that we've got at least two of every element
_, counts = np.unique(arr, return_counts=True)
if len(counts) == 2 and counts.min() > 2:
return arr
else:
return np.random.normal(size=sz)
for d_t in [1, -1]:
for is_discrete in [True, False] if d_t <= 1 else [False]:
for d_y in [3, 1, -1]:
for d_x in [2, None]:
for d_w in [2, None]:
W, X, Y, T = [make_random(is_discrete, d)
for is_discrete, d in [(False, d_w),
(False, d_x),
(False, d_y),
(is_discrete, d_t)]]
d_t_final = 1 if is_discrete else d_t
effect_shape = (n,) + ((d_y,) if d_y > 0 else ())
effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1), 6)
marginal_effect_shape = ((n,) +
((d_y,) if d_y > 0 else ()) +
((d_t_final,) if d_t_final > 0 else ()))
marginal_effect_summaryframe_shape = (n * (d_y if d_y > 0 else 1),
6 * (d_t_final if d_t_final > 0 else 1))
# since T isn't passed to const_marginal_effect, defaults to one row if X is None
const_marginal_effect_shape = ((n if d_x else 1,) +
((d_y,) if d_y > 0 else ()) +
((d_t_final,) if d_t_final > 0 else()))
const_marginal_effect_summaryframe_shape = (
(n if d_x else 1) * (d_y if d_y > 0 else 1),
6 * (d_t_final if d_t_final > 0 else 1))
model_t = LogisticRegression() if is_discrete else WeightedLasso()
base_infs = [None, BootstrapInference(2)]
for est, multi, infs in [(NonParamDML(model_y=WeightedLasso(),
model_t=model_t,
model_final=WeightedLasso(),
featurizer=None,
discrete_treatment=is_discrete),
True,
base_infs),
(NonParamDML(model_y=WeightedLasso(),
model_t=model_t,
model_final=WeightedLasso(),
featurizer=FunctionTransformer(),
discrete_treatment=is_discrete),
True,
base_infs), ]:
if not(multi) and d_y > 1:
continue
for inf in infs:
with self.subTest(d_w=d_w, d_x=d_x, d_y=d_y, d_t=d_t,
is_discrete=is_discrete, est=est, inf=inf):
if X is None:
with pytest.raises(AttributeError):
est.fit(Y, T, X=X, W=W, inference=inf)
continue
est.fit(Y, T, X=X, W=W, inference=inf)
# make sure we can call the marginal_effect and effect methods
const_marg_eff = est.const_marginal_effect(X)
marg_eff = est.marginal_effect(T, X)
self.assertEqual(shape(marg_eff), marginal_effect_shape)
self.assertEqual(shape(const_marg_eff), const_marginal_effect_shape)
np.testing.assert_array_equal(
marg_eff if d_x else marg_eff[0:1], const_marg_eff)
T0 = np.full_like(T, 'a') if is_discrete else np.zeros_like(T)
eff = est.effect(X, T0=T0, T1=T)
self.assertEqual(shape(eff), effect_shape)
if inf is not None:
const_marg_eff_int = est.const_marginal_effect_interval(X)
marg_eff_int = est.marginal_effect_interval(T, X)
self.assertEqual(shape(marg_eff_int),
(2,) + marginal_effect_shape)
self.assertEqual(shape(const_marg_eff_int),
(2,) + const_marginal_effect_shape)
self.assertEqual(shape(est.effect_interval(X, T0=T0, T1=T)),
(2,) + effect_shape)
if inf in ['auto', 'statsmodels', 'debiasedlasso', 'blb']:
const_marg_effect_inf = est.const_marginal_effect_inference(X)
T1 = np.full_like(T, 'b') if is_discrete else T
effect_inf = est.effect_inference(X, T0=T0, T1=T1)
marg_effect_inf = est.marginal_effect_inference(T, X)
# test const marginal inference
self.assertEqual(shape(const_marg_effect_inf.summary_frame()),
const_marginal_effect_summaryframe_shape)
self.assertEqual(shape(const_marg_effect_inf.point_estimate),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.stderr),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.var),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.pvalue()),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.zstat()),
const_marginal_effect_shape)
self.assertEqual(shape(const_marg_effect_inf.conf_int()),
(2,) + const_marginal_effect_shape)
np.testing.assert_array_almost_equal(
const_marg_effect_inf.conf_int()[0],
const_marg_eff_int[0], decimal=5)
const_marg_effect_inf.population_summary()._repr_html_()
# test effect inference
self.assertEqual(shape(effect_inf.summary_frame()),
effect_summaryframe_shape)
self.assertEqual(shape(effect_inf.point_estimate),
effect_shape)
self.assertEqual(shape(effect_inf.stderr),
effect_shape)
self.assertEqual(shape(effect_inf.var),
effect_shape)
self.assertEqual(shape(effect_inf.pvalue()),
effect_shape)
self.assertEqual(shape(effect_inf.zstat()),
effect_shape)
self.assertEqual(shape(effect_inf.conf_int()),
(2,) + effect_shape)
np.testing.assert_array_almost_equal(
effect_inf.conf_int()[0],
est.effect_interval(X, T0=T0, T1=T1)[0], decimal=5)
effect_inf.population_summary()._repr_html_()
# test marginal effect inference
self.assertEqual(shape(marg_effect_inf.summary_frame()),
marginal_effect_summaryframe_shape)
self.assertEqual(shape(marg_effect_inf.point_estimate),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.stderr),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.var),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.pvalue()),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.zstat()),
marginal_effect_shape)
self.assertEqual(shape(marg_effect_inf.conf_int()),
(2,) + marginal_effect_shape)
np.testing.assert_array_almost_equal(
marg_effect_inf.conf_int()[0], marg_eff_int[0], decimal=5)
marg_effect_inf.population_summary()._repr_html_()
est.score(Y, T, X, W)
# make sure we can call effect with implied scalar treatments, no matter the
# dimensions of T, and also that we warn when there are multiple treatments
if d_t > 1:
cm = self.assertWarns(Warning)
else:
cm = ExitStack() # ExitStack can be used as a "do nothing" ContextManager
with cm:
effect_shape2 = (n if d_x else 1,) + ((d_y,) if d_y > 0 else())
eff = est.effect(X) if not is_discrete else est.effect(X, T0='a', T1='b')
self.assertEqual(shape(eff), effect_shape2)
def test_bad_splits_discrete(self):
"""
Tests that when some training splits in a crossfit fold don't contain all treatments then an error
is raised.
"""
Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])
T = np.array([2, 2, 1, 2, 1, 1, 1, 1])
X = np.ones((8, 1))
est = LinearDML(n_splits=[(np.arange(4, 8), np.arange(4))], discrete_treatment=True)
with pytest.raises(AttributeError):
est.fit(Y, T, X=X)
Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])
T = np.array([2, 2, 1, 2, 2, 2, 2, 2])
X = np.ones((8, 1))
est = LinearDML(n_splits=[(np.arange(4, 8), np.arange(4))], discrete_treatment=True)
with pytest.raises(AttributeError):
est.fit(Y, T, X=X)
def test_bad_treatment_nonparam(self):
"""
Test that the non-parametric dml raises errors when treatment is not binary or single dimensional
"""
Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])
T = np.array([3, 2, 1, 2, 1, 2, 1, 3])
X = np.ones((8, 1))
est = NonParamDML(model_y=WeightedLasso(),
model_t=LogisticRegression(),
model_final=WeightedLasso(),
discrete_treatment=True)
with pytest.raises(AttributeError):
est.fit(Y, T, X=X)
T = np.ones((8, 2))
est = NonParamDML(model_y=WeightedLasso(),
model_t=LinearRegression(),
model_final=WeightedLasso(),
discrete_treatment=False)
with pytest.raises(AttributeError):
est.fit(Y, T, X=X)
def test_access_to_internal_models(self):
"""
Test that API related to accessing the nuisance models, cate_model and featurizer is working.
"""
Y = np.array([2, 3, 1, 3, 2, 1, 1, 1])
T = np.array([3, 2, 1, 2, 1, 2, 1, 3])
X = np.ones((8, 1))
est = DML(model_y=WeightedLasso(),
model_t=LogisticRegression(),
model_final=WeightedLasso(),
featurizer=PolynomialFeatures(degree=2, include_bias=False),
fit_cate_intercept=True,
discrete_treatment=True)
est.fit(Y, T, X=X)
assert isinstance(est.original_featurizer, PolynomialFeatures)
assert isinstance(est.featurizer, Pipeline)
assert isinstance(est.model_cate, WeightedLasso)
for mdl in est.models_y:
assert isinstance(mdl, WeightedLasso)
for mdl in est.models_t:
assert isinstance(mdl, LogisticRegression)
np.testing.assert_array_equal(est.cate_feature_names(['A']), ['A', 'A^2'])
np.testing.assert_array_equal(est.cate_feature_names(), ['x0', 'x0^2'])
est = DML(model_y=WeightedLasso(),
model_t=LogisticRegression(),
model_final=WeightedLasso(),
featurizer=None,
fit_cate_intercept=True,
discrete_treatment=True)
est.fit(Y, T, X=X)
assert est.original_featurizer is None
assert isinstance(est.featurizer, FunctionTransformer)
assert isinstance(est.model_cate, WeightedLasso)
for mdl in est.models_y:
assert isinstance(mdl, WeightedLasso)
for mdl in est.models_t:
assert isinstance(mdl, LogisticRegression)
np.testing.assert_array_equal(est.cate_feature_names(['A']), ['A'])
def test_forest_dml_perf(self):
"""Testing accuracy of forest DML is reasonable"""
np.random.seed(1234)
n = 20000 # number of raw samples
d = 10
for _ in range(2):
X = np.random.binomial(1, .5, size=(n, d))
T = np.random.binomial(1, .5, size=(n,))
def true_fn(x):
return -1 + 2 * x[:, 0] + x[:, 1] * x[:, 2]
y = true_fn(X) * T + X[:, 0] + (1 * X[:, 0] + 1) * np.random.normal(0, 1, size=(n,))
XT = np.hstack([T.reshape(-1, 1), X])
X1, X2, y1, y2, X1_sum, X2_sum, y1_sum, y2_sum, n1_sum, n2_sum, var1_sum, var2_sum = _summarize(XT, y)
# We concatenate the two copies data
X_sum = np.vstack([np.array(X1_sum)[:, 1:], np.array(X2_sum)[:, 1:]])
T_sum = np.concatenate((np.array(X1_sum)[:, 0], np.array(X2_sum)[:, 0]))
y_sum = np.concatenate((y1_sum, y2_sum)) # outcome
n_sum = np.concatenate((n1_sum, n2_sum)) # number of summarized points
var_sum = np.concatenate((var1_sum, var2_sum)) # variance of the summarized points
for summarized, min_samples_leaf in [(False, 20), (True, 1)]:
est = CausalForestDML(model_y=GradientBoostingRegressor(n_estimators=30, min_samples_leaf=30),
model_t=GradientBoostingClassifier(n_estimators=30, min_samples_leaf=30),
discrete_treatment=True,
n_crossfit_splits=2,
n_estimators=1000,
max_samples=.4,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=0.001,
verbose=0, min_var_fraction_leaf=.1,
fit_intercept=False,
random_state=12345)
if summarized:
est.fit(y_sum, T_sum, X=X_sum[:, :4], W=X_sum[:, 4:],
sample_weight=n_sum)
else:
est.fit(y, T, X=X[:, :4], W=X[:, 4:])
X_test = np.array(list(itertools.product([0, 1], repeat=4)))
point = est.effect(X_test)
truth = true_fn(X_test)
lb, ub = est.effect_interval(X_test, alpha=.01)
np.testing.assert_allclose(point, truth, rtol=0, atol=.3)
np.testing.assert_array_less(lb - .01, truth)
np.testing.assert_array_less(truth, ub + .01)
est = CausalForestDML(model_y=GradientBoostingRegressor(n_estimators=50, min_samples_leaf=100),
model_t=GradientBoostingRegressor(n_estimators=50, min_samples_leaf=100),
discrete_treatment=False,
n_crossfit_splits=2,
n_estimators=1000,
max_samples=.4,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=0.001,
verbose=0, min_var_fraction_leaf=.1,
fit_intercept=False,
random_state=12345)
if summarized:
est.fit(y_sum, T_sum, X=X_sum[:, :4], W=X_sum[:, 4:],
sample_weight=n_sum)
else:
est.fit(y, T, X=X[:, :4], W=X[:, 4:])
X_test = np.array(list(itertools.product([0, 1], repeat=4)))
point = est.effect(X_test)
truth = true_fn(X_test)
lb, ub = est.effect_interval(X_test, alpha=.01)
np.testing.assert_allclose(point, truth, rtol=0, atol=.3)
np.testing.assert_array_less(lb - .01, truth)
np.testing.assert_array_less(truth, ub + .01)
def test_can_use_vectors(self):
"""Test that we can pass vectors for T and Y (not only 2-dimensional arrays)."""
dmls = [
LinearDML(LinearRegression(), LinearRegression(), fit_cate_intercept=False),
SparseLinearDML(LinearRegression(), LinearRegression(), fit_cate_intercept=False)
]
for dml in dmls:
dml.fit(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), X=np.ones((6, 1)))
self.assertAlmostEqual(dml.coef_.reshape(())[()], 1)
score = dml.score(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), np.ones((6, 1)))
self.assertAlmostEqual(score, 0)
def test_can_use_sample_weights(self):
"""Test that we can pass sample weights to an estimator."""
dmls = [
LinearDML(LinearRegression(), 'auto', fit_cate_intercept=False),
SparseLinearDML(LinearRegression(), 'auto', fit_cate_intercept=False)
]
for dml in dmls:
dml.fit(np.array([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]),
X=np.ones((12, 1)), sample_weight=np.ones((12, )))
self.assertAlmostEqual(dml.coef_.reshape(())[()], 1)
def test_discrete_treatments(self):
"""Test that we can use discrete treatments"""
dmls = [
LinearDML(LinearRegression(), LogisticRegression(C=1000),
fit_cate_intercept=False, discrete_treatment=True),
SparseLinearDML(LinearRegression(), LogisticRegression(C=1000),
fit_cate_intercept=False, discrete_treatment=True)
]
for dml in dmls:
# create a simple artificial setup where effect of moving from treatment
# 1 -> 2 is 2,
# 1 -> 3 is 1, and
# 2 -> 3 is -1 (necessarily, by composing the previous two effects)
# Using an uneven number of examples from different classes,
# and having the treatments in non-lexicographic order,
# Should rule out some basic issues.
dml.fit(np.array([2, 3, 1, 3, 2, 1, 1, 1]), np.array([3, 2, 1, 2, 3, 1, 1, 1]), X=np.ones((8, 1)))
np.testing.assert_almost_equal(
dml.effect(
np.ones((9, 1)),
T0=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
T1=np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
),
[0, 2, 1, -2, 0, -1, -1, 1, 0],
decimal=2)
dml.score(np.array([2, 3, 1, 3, 2, 1, 1, 1]), np.array([3, 2, 1, 2, 3, 1, 1, 1]), np.ones((8, 1)))
def test_can_custom_splitter(self):
# test that we can fit with a KFold instance
dml = LinearDML(LinearRegression(), LogisticRegression(C=1000),
discrete_treatment=True, n_splits=KFold())
dml.fit(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), X=np.ones((6, 1)))
dml.score(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), np.ones((6, 1)))
# test that we can fit with a train/test iterable
dml = LinearDML(LinearRegression(), LogisticRegression(C=1000),
discrete_treatment=True, n_splits=[([0, 1, 2], [3, 4, 5])])
dml.fit(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), X=np.ones((6, 1)))
dml.score(np.array([1, 2, 3, 1, 2, 3]), np.array([1, 2, 3, 1, 2, 3]), np.ones((6, 1)))
def test_can_use_featurizer(self):
"Test that we can use a featurizer, and that fit is only called during training"
# predetermined splits ensure that all features are seen in each split
splits = ([0, 2, 3, 6, 8, 11, 13, 15, 16],
[1, 4, 5, 7, 9, 10, 12, 14, 17])
dml = LinearDML(LinearRegression(), LinearRegression(),
fit_cate_intercept=False, featurizer=OneHotEncoder(sparse=False),
n_splits=[splits, splits[::-1]])
T = np.tile([1, 2, 3], 6)
Y = np.array([1, 2, 3, 1, 2, 3])
Y = np.concatenate([Y, 0 * Y, -Y])
X = np.repeat([[7, 8, 9]], 6, axis=1).T
dml.fit(Y, T, X=X)
# because there is one fewer unique element in the test set, fit_transform would return the wrong number of fts
X_test = np.array([[7, 8]]).T
np.testing.assert_equal(dml.effect(X_test)[::-1], dml.effect(X_test[::-1]))
eff_int = np.array(dml.effect_interval(X_test))
eff_int_rev = np.array(dml.effect_interval(X_test[::-1]))
np.testing.assert_equal(eff_int[:, ::-1], eff_int_rev)
eff_int = np.array(dml.const_marginal_effect_interval(X_test))
eff_int_rev = np.array(dml.const_marginal_effect_interval(X_test[::-1]))
np.testing.assert_equal(eff_int[:, ::-1], eff_int_rev)
def test_can_use_statsmodel_inference(self):
"""Test that we can use statsmodels to generate confidence intervals"""
dml = LinearDML(LinearRegression(), LogisticRegression(C=1000),
discrete_treatment=True)
dml.fit(np.array([2, 3, 1, 3, 2, 1, 1, 1]), np.array(
[3, 2, 1, 2, 3, 1, 1, 1]), X=np.ones((8, 1)))
interval = dml.effect_interval(np.ones((9, 1)),
T0=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
T1=np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]),
alpha=0.05)
point = dml.effect(np.ones((9, 1)),
T0=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
T1=np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]))
assert len(interval) == 2
lo, hi = interval
assert lo.shape == hi.shape == point.shape
assert (lo <= point).all()
assert (point <= hi).all()
assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width
interval = dml.const_marginal_effect_interval(np.ones((9, 1)), alpha=0.05)
point = dml.const_marginal_effect(np.ones((9, 1)))
assert len(interval) == 2
lo, hi = interval
assert lo.shape == hi.shape == point.shape
assert (lo <= point).all()
assert (point <= hi).all()
assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width
interval = dml.coef__interval(alpha=0.05)
point = dml.coef_
assert len(interval) == 2
lo, hi = interval
assert lo.shape == hi.shape == point.shape
assert (lo <= point).all()
assert (point <= hi).all()
assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width
interval = dml.intercept__interval(alpha=0.05)
point = dml.intercept_
assert len(interval) == 2
lo, hi = interval
assert (lo <= point).all()
assert (point <= hi).all()
assert (lo < hi).any() # for at least some of the examples, the CI should have nonzero width
def test_ignores_final_intercept(self):
"""Test that final model intercepts are ignored (with a warning)"""
class InterceptModel:
def fit(self, Y, X):
pass
def predict(self, X):
return X + 1
def prediction_stderr(self, X):
return np.zeros(X.shape[0])
# (incorrectly) use a final model with an intercept
dml = DML(LinearRegression(), LinearRegression(),
model_final=InterceptModel())
# Because final model is fixed, actual values of T and Y don't matter
t = np.random.normal(size=100)
y = np.random.normal(size=100)
with self.assertWarns(Warning): # we should warn whenever there's an intercept
dml.fit(y, t)
assert dml.const_marginal_effect() == 1 # coefficient on X in InterceptModel is 1
assert dml.const_marginal_effect_inference().point_estimate == 1
assert dml.const_marginal_effect_inference().conf_int() == (1, 1)
assert dml.const_marginal_effect_interval() == (1, 1)
assert dml.effect() == 1
assert dml.effect_inference().point_estimate == 1
assert dml.effect_inference().conf_int() == (1, 1)
assert dml.effect_interval() == (1, 1)
assert dml.marginal_effect(1) == 1 # coefficient on X in InterceptModel is 1
assert dml.marginal_effect_inference(1).point_estimate == 1
assert dml.marginal_effect_inference(1).conf_int() == (1, 1)
assert dml.marginal_effect_interval(1) == (1, 1)
def test_sparse(self):
for _ in range(5):
# Ensure reproducibility
np.random.seed(1234)
n_p = np.random.randint(2, 5) # 2 to 4 products
d_w = np.random.randint(0, 5) # random number of covariates
min_n = np.ceil(2 + d_w * (1 + (d_w + 1) / n_p)) # minimum number of rows per product
n_r = np.random.randint(min_n, min_n + 3)
with self.subTest(n_p=n_p, d_w=d_w, n_r=n_r):
TestDML._test_sparse(n_p, d_w, n_r)
def test_linear_sparse(self):
"""SparseDML test with a sparse DGP"""
# Sparse DGP
np.random.seed(123)
n_x = 50
n_nonzero = 5
n_w = 5
n = 1000
# Treatment effect coef
a = np.zeros(n_x)
nonzero_idx = np.random.choice(n_x, size=n_nonzero, replace=False)
a[nonzero_idx] = 1
# Other coefs
b = np.zeros(n_x + n_w)
g = np.zeros(n_x + n_w)
b_nonzero = np.random.choice(n_x + n_w, size=n_nonzero, replace=False)
g_nonzero = np.random.choice(n_x + n_w, size=n_nonzero, replace=False)
b[b_nonzero] = 1
g[g_nonzero] = 1
# Features and controls
x = np.random.normal(size=(n, n_x))
w = np.random.normal(size=(n, n_w))
xw = np.hstack([x, w])
err_T = np.random.normal(size=n)
T = xw @ b + err_T
err_Y = np.random.normal(size=n, scale=0.5)
Y = T * (x @ a) + xw @ g + err_Y
# Test sparse estimator
# --> test coef_, intercept_
sparse_dml = SparseLinearDML(fit_cate_intercept=False)
sparse_dml.fit(Y, T, X=x, W=w)
np.testing.assert_allclose(a, sparse_dml.coef_, atol=2e-1)
with pytest.raises(AttributeError):
sparse_dml.intercept_
# --> test treatment effects
# Restrict x_test to vectors of norm < 1
x_test = np.random.uniform(size=(10, n_x))
true_eff = (x_test @ a)
eff = sparse_dml.effect(x_test, T0=0, T1=1)
np.testing.assert_allclose(true_eff, eff, atol=0.5)
# --> check inference
y_lower, y_upper = sparse_dml.effect_interval(x_test, T0=0, T1=1)
in_CI = ((y_lower < true_eff) & (true_eff < y_upper))
# Check that a majority of true effects lie in the 5-95% CI
self.assertTrue(in_CI.mean() > 0.8)
@staticmethod
def _generate_recoverable_errors(a_X, X, a_W=None, W=None, featurizer=None):
"""Return error vectors e_t and e_y such that OLS can recover the true coefficients from both stages."""
if W is None:
W = np.empty((shape(X)[0], 0))
if a_W is None:
a_W = np.zeros((shape(W)[1],))
# to correctly recover coefficients for T via OLS, we need e_t to be orthogonal to [W;X]
WX = hstack([W, X])
e_t = rand_sol(WX.T, np.zeros((shape(WX)[1],)))
# to correctly recover coefficients for Y via OLS, we need ([X; W]⊗[1; ϕ(X); W])⁺ e_y =
# -([X; W]⊗[1; ϕ(X); W])⁺ ((ϕ(X)⊗e_t)a_X+(W⊗e_t)a_W)
# then, to correctly recover a in the third stage, we additionally need (ϕ(X)⊗e_t)ᵀ e_y = 0
ϕ = featurizer.fit_transform(X) if featurizer is not None else X
v_X = cross_product(ϕ, e_t)
v_W = cross_product(W, e_t)
M = np.linalg.pinv(cross_product(WX, hstack([np.ones((shape(WX)[0], 1)), ϕ, W])))
e_y = rand_sol(vstack([M, v_X.T]), vstack([-M @ (v_X @ a_X + v_W @ a_W), np.zeros((shape(v_X)[1],))]))
return e_t, e_y
# sparse test case: heterogeneous effect by product
@staticmethod
def _test_sparse(n_p, d_w, n_r):
# need at least as many rows in e_y as there are distinct columns
# in [X;X⊗W;W⊗W;X⊗e_t] to find a solution for e_t
assert n_p * n_r >= 2 * n_p + n_p * d_w + d_w * (d_w + 1) / 2
a = np.random.normal(size=(n_p,)) # one effect per product
n = n_p * n_r * 100
p = np.tile(range(n_p), n_r * 100) # product id
b = np.random.normal(size=(d_w + n_p,))
g = np.random.normal(size=(d_w + n_p,))
x = np.empty((2 * n, n_p)) # product dummies
w = np.empty((2 * n, d_w))
y = np.empty(2 * n)
t = np.empty(2 * n)
for fold in range(0, 2):
x_f = OneHotEncoder().fit_transform(np.reshape(p, (-1, 1))).toarray()
w_f = np.random.normal(size=(n, d_w))
xw_f = hstack([x_f, w_f])
e_t_f, e_y_f = TestDML._generate_recoverable_errors(a, x_f, W=w_f)
t_f = xw_f @ b + e_t_f
y_f = t_f * np.choose(p, a) + xw_f @ g + e_y_f
x[fold * n:(fold + 1) * n, :] = x_f
w[fold * n:(fold + 1) * n, :] = w_f
y[fold * n:(fold + 1) * n] = y_f
t[fold * n:(fold + 1) * n] = t_f
dml = SparseLinearDML(LinearRegression(fit_intercept=False), LinearRegression(
fit_intercept=False), fit_cate_intercept=False)
dml.fit(y, t, X=x, W=w)
np.testing.assert_allclose(a, dml.coef_.reshape(-1), atol=1e-1)
eff = reshape(t * np.choose(np.tile(p, 2), a), (-1,))
np.testing.assert_allclose(eff, dml.effect(x, T0=0, T1=t), atol=1e-1)
def test_nuisance_scores(self):
X = np.random.choice(np.arange(5), size=(100, 3))
y = np.random.normal(size=(100,))
T = T0 = T1 = np.random.choice(np.arange(3), size=(100, 2))
W = np.random.normal(size=(100, 2))
for n_splits in [1, 2, 3]:
est = LinearDML(n_splits=n_splits)
est.fit(y, T, X=X, W=W)
assert len(est.nuisance_scores_t) == len(est.nuisance_scores_y) == n_splits
def test_categories(self):
dmls = [LinearDML, SparseLinearDML]
for ctor in dmls:
dml1 = ctor(LinearRegression(), LogisticRegression(C=1000),
fit_cate_intercept=False, discrete_treatment=True, random_state=123)
dml2 = ctor(LinearRegression(), LogisticRegression(C=1000),
fit_cate_intercept=False, discrete_treatment=True, categories=['c', 'b', 'a'],
random_state=123)
# create a simple artificial setup where effect of moving from treatment
# a -> b is 2,
# a -> c is 1, and
# b -> c is -1 (necessarily, by composing the previous two effects)
# Using an uneven number of examples from different classes,
# and having the treatments in non-lexicographic order,
# should rule out some basic issues.
# Note that explicitly specifying the dtype as object is necessary until
# there's a fix for https://github.com/scikit-learn/scikit-learn/issues/15616
for dml in [dml1, dml2]:
dml.fit(np.array([2, 3, 1, 3, 2, 1, 1, 1]),
np.array(['c', 'b', 'a', 'b', 'c', 'a', 'a', 'a'], dtype='object'), X=np.ones((8, 1)))
# estimated effects should be identical when treatment is explicitly given
np.testing.assert_almost_equal(
dml1.effect(
np.ones((9, 1)),
T0=np.array(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], dtype='object'),
T1=np.array(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
),
dml2.effect(
np.ones((9, 1)),
T0=np.array(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], dtype='object'),
T1=np.array(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'], dtype='object')
),
decimal=4)
# but const_marginal_effect should be reordered based on the explicit cagetories
cme1 = dml1.const_marginal_effect(np.ones((1, 1))).reshape(-1)
cme2 = dml2.const_marginal_effect(np.ones((1, 1))).reshape(-1)
self.assertAlmostEqual(cme1[1], -cme2[1], places=3) # 1->3 in original ordering; 3->1 in new ordering
# 1-> 2 in original ordering; combination of 3->1 and 3->2
self.assertAlmostEqual(cme1[0], -cme2[1] + cme2[0], places=3)
def test_groups(self):
groups = [1, 2, 3, 4, 5, 6] * 10
t = groups
y = groups
est = LinearDML()
with pytest.raises(Exception): # can't pass groups without a compatible n_split
est.fit(y, t, groups=groups)
# test outer grouping
est = LinearDML(LinearRegression(), LinearRegression(), n_splits=GroupKFold(2))
est.fit(y, t, groups=groups)
# test nested grouping
class NestedModel(LassoCV):
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
precompute='auto', max_iter=1000, tol=1e-4, normalize=False,
copy_X=True, cv=None, verbose=False, n_jobs=None,
positive=False, random_state=None, selection='cyclic'):
super().__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
def fit(self, X, y):
# ensure that the grouping has worked correctly and we get all 10 copies of the items in
# whichever groups we saw
(yvals, cts) = np.unique(y, return_counts=True)
for (yval, ct) in zip(yvals, cts):
if ct != 10:
raise Exception("Grouping failed; received {0} copies of {1} instead of 10".format(ct, yval))
return super().fit(X, y)
# test nested grouping
est = LinearDML(NestedModel(cv=2), NestedModel(cv=2), n_splits=GroupKFold(2))
est.fit(y, t, groups=groups)
# by default, we use 5 split cross-validation for our T and Y models
# but we don't have enough groups here to split both the outer and inner samples with grouping
# TODO: does this imply we should change some defaults to make this more likely to succeed?
est = LinearDML(n_splits=GroupKFold(2))
with pytest.raises(Exception):
est.fit(y, t, groups=groups)
|
the-stack_106_23648 | import logging, importlib, sys, os
log = logging.getLogger('genthemall.utils')
def load_class(clazz):
"""
util method for dynamic load class.
"""
dotIdx = clazz.rindex('.')
mod = importlib.import_module(clazz[0:dotIdx])
return getattr(mod, clazz[dotIdx+1:])
def load_function(fn):
"""
util method for dynamic load function.
"""
return load_class(fn)
def load_command(cmdName):
"""
util method for dynamic load genthemall command.
"""
return load_class('genthemall.commands.Command%s%s' % \
(cmdName[0].upper(), cmdName[1:]))
def transform_config(config, _type):
"""
Transform config to some type config.
"""
try:
confFn = load_function('genthemall.conf.%s' % _type)
confFn(config)
except AttributeError:
log.error('config type [%s] not found.' % (_type))
sys.exit(1)
def copyfile(source, dest, buffer_size=1024*1024):
"""
Copy a file from source to dest. source and dest
can either be strings or any object with a read or
write method, like StringIO for example.
"""
if not hasattr(source, 'read'):
source = open(source, 'rb')
if not hasattr(dest, 'write'):
dest = open(dest, 'wb')
while 1:
copy_buffer = source.read(buffer_size)
if copy_buffer:
dest.write(copy_buffer)
else:
break
source.close()
dest.close()
def copyfiles(src, dest, ignore=None):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for f in files:
if f not in ignored:
copyfiles(os.path.join(src, f),
os.path.join(dest, f),
ignore)
else:
copyfile(src, dest)
|
the-stack_106_23650 | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from mininet.util import dumpNodeConnections
from subprocess import call
import os
import time
import subprocess
path_home = os.getenv("HOME") #Captura o caminho da pasta HOME
def myNetwork(i):
net = Mininet( topo=None,
build=False,
host=CPULimitedHost,
link=TCLink,
ipBase='10.0.0.0/8')
info( '*** Adding controller\n' )
c0=net.addController(name='c0',
controller=RemoteController,
ip='127.0.0.1',
protocol='tcp',
port=6633)
info( '*** Add switches\n')
s3 = net.addSwitch('s3', cls=OVSKernelSwitch)
s5 = net.addSwitch('s5', cls=OVSKernelSwitch)
s4 = net.addSwitch('s4', cls=OVSKernelSwitch)
s1 = net.addSwitch('s1', cls=OVSKernelSwitch)
s2 = net.addSwitch('s2', cls=OVSKernelSwitch)
info( '*** Add hosts\n')
h1 = net.addHost('h1', cls=Host, ip='10.0.0.1')
h2 = net.addHost('h2', cls=Host, ip='10.0.0.2')
h3 = net.addHost('h3', cls=Host, ip='10.0.0.3')
h4 = net.addHost('h4', cls=Host, ip='10.0.0.4')
h5 = net.addHost('h5', cls=Host, ip='10.0.0.5')
h6 = net.addHost('h6', cls=Host, ip='10.0.0.6')
h7 = net.addHost('h7', cls=Host, ip='10.0.0.7')
srv1 = net.addHost('srv1', cls=Host, ip='10.0.0.8')
srv2 = net.addHost('srv2', cls=Host, ip='10.0.0.9')
info( '*** Add links\n')
s1h1 = {'bw':25}
net.addLink(s1, h1, cls=TCLink , **s1h1)
s1h2 = {'bw':25}
net.addLink(s1, h2, cls=TCLink , **s1h2)
s1h3 = {'bw':25}
net.addLink(s1, h3, cls=TCLink , **s1h3)
s4h4 = {'bw':25}
net.addLink(s4, h4, cls=TCLink , **s4h4)
s4h5 = {'bw':25}
net.addLink(s4, h5, cls=TCLink , **s4h5)
s5h6 = {'bw':25}
net.addLink(s5, h6, cls=TCLink , **s5h6)
s5h7 = {'bw':25}
net.addLink(s5, h7, cls=TCLink , **s5h7)
s3h8 = {'bw':25}
net.addLink(s3, h8, cls=TCLink , **s3h8)
s3h9 = {'bw':25}
net.addLink(s3, h9, cls=TCLink , **s3h9)
s1s2 = {'bw':25}
net.addLink(s1, s2, cls=TCLink , **s1s2)
s3s2 = {'bw':25}
net.addLink(s3, s2, cls=TCLink , **s3s2)
s4s2 = {'bw':25}
net.addLink(s4, s2, cls=TCLink , **s4s2)
s5s2 = {'bw':25}
net.addLink(s5, s2, cls=TCLink , **s5s2)
info( '*** Starting network\n')
net.build()
info( '*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
net.get('s3').start([c0])
net.get('s5').start([c0])
net.get('s4').start([c0])
net.get('s1').start([c0])
net.get('s2').start([c0])
info( '*** Setting routes\n')
h1.cmd('route add default dev h1-eth1')
h2.cmd('route add default dev h2-eth1')
h3.cmd('route add default dev h3-eth1')
h4.cmd('route add default dev h4-eth1')
h5.cmd('route add default dev h5-eth1')
h6.cmd('route add default dev h6-eth1')
h7.cmd('route add default dev h7-eth1')
srv1.cmd('route add default dev srv1-eth1')
srv2.cmd('route add default dev srv2-eth1')
info( '*** Post configure switches and hosts\n')
dumpNodeConnections(net.hosts)
#Instala as filas de QoS
os.system('python /home/bruno/ryu/Bruno/plotagem.py &')
os.system('python /home/bruno/ryu/Bruno/admin.py &')
os.system('python /home/bruno/ryu/Bruno/Resultados/dados_ovs.py '+str(i+1)+' 2 &') # 2 = 2 Iperf
net.pingAll() #Pinga todos os hosts
srv1.cmd('python /home/bruno/ryu/Bruno/EnviaPacoteUDP_Server.py &') #Envia pacote para instalar a regras de QoS
h2.cmd('iperf -s -u &')
h3.cmd('iperf -s -u &')
print('Iperf 1 Iniciado!!!')
srv2.cmd('iperf -c 10.0.0.2 -u -t 500 -i 1 -b 20m &')
print('Iperf 2 Iniciado!!!')
srv2.cmd('iperf -c 10.0.0.3 -u -t 500 -i 1 -b 20m &')
time.sleep(29)
print('Iniciando Server!!')
srv1.cmd('python /home/bruno/ryu/Bruno/server_semQoS.py &')
time.sleep(1)
print('Iniciando Client!!')
h1.cmd('python /home/bruno/ryu/Bruno/client_semQoS.py &')
time.sleep(1)
print('Rodada: '+str(i+1))
for r in range(32,215):
if r%10 == 0:
print('Tempo: '+str(r)+' Rodada: '+str(i+1))
time.sleep(1)
#CLI(net)
info('*** Fim...Aguardando os 30 segundos!!!')
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
for i in range(0,30):
myNetwork(i)
time.sleep(30)
|
the-stack_106_23651 | from functools import cache, partial, reduce
from math import prod
import pdb
import aoc
from toolz import keyfilter
from tadhg_utils import lcompact, lfilter, lmap, splitstrip, splitstriplines
INPUT, TEST = aoc.get_inputs(__file__)
TA1 = 15
TA2 = 1134
A1 = 530
A2 = 1019494
def process_one(data):
return sum(map(lambda x: x[1][1], get_lows(data)))
def get_lows(data):
grid = []
gridd = {}
for y, row in enumerate(data):
for x, char in enumerate(row):
grid.append(aoc.Point(x=x, y=y))
gridd[aoc.Point(x=x, y=y)] = (char, None)
for pt in gridd:
above = gridd.get(aoc.Point(x=pt.x, y=pt.y - 1))
below = gridd.get(aoc.Point(x=pt.x, y=pt.y + 1))
left = gridd.get(aoc.Point(x=pt.x - 1, y=pt.y))
right = gridd.get(aoc.Point(x=pt.x + 1, y=pt.y))
adjs = lcompact([above, below, left, right])
curr = int(gridd[pt][0])
low = True
for adj in adjs:
if curr >= int(adj[0]):
low = False
if low:
gridd[pt] = (gridd[pt][0], curr + 1)
lows = lmap(
lambda x: x, lfilter(lambda x: x[1][1] is not None, gridd.items())
)
return lows
def explore(gridd, start, future, basin):
dirs = [
aoc.Point(x=start.x, y=start.y - 1),
aoc.Point(x=start.x, y=start.y + 1),
aoc.Point(x=start.x - 1, y=start.y),
aoc.Point(x=start.x + 1, y=start.y),
]
near = keyfilter(lambda x: x in dirs and x not in basin, gridd)
for pt, data in near.items():
if data and int(data[0]) not in (None, 9):
if pt not in future:
future.append(pt)
if pt not in basin:
basin.append(pt)
for pt in future:
basin = explore(gridd, pt, [], basin)
return basin
def process_two(data):
grid = {}
for y, row in enumerate(data):
for x, char in enumerate(row):
grid[aoc.Point(x=x, y=y)] = (char, None)
lows = get_lows(data)
basins = lmap(lambda x: explore(grid, x[0], [], [x[0]]), lows)
sbasins = sorted(basins, key=len)
threebasins = sbasins[-3:]
return prod(map(len, threebasins))
return data
def cli_main() -> None:
input_funcs = [splitstriplines]
data = aoc.load_and_process_input(INPUT, input_funcs)
aoc.run_tests(TEST, TA1, TA2, A1, input_funcs, process_one, process_two)
result_one = process_one(data)
print(result_one)
result_two = process_two(data)
aoc.finish(result_one, A1, result_two, A2)
if __name__ == "__main__":
cli_main()
"""
--- Day 9: Smoke Basin ---
These caves seem to be lava tubes. Parts are even still volcanically active;
small hydrothermal vents release smoke into the caves that slowly settles like
rain.
If you can model how the smoke flows through the caves, you might be able to
avoid it and be that much safer. The submarine generates a heightmap of the
floor of the nearby caves for you (your puzzle input).
Smoke flows to the lowest point of the area it's in. For example, consider the
following heightmap:
2199943210
3987894921
9856789892
8767896789
9899965678
Each number corresponds to the height of a particular location, where 9 is the
highest and 0 is the lowest a location can be.
Your first goal is to find the low points - the locations that are lower than
any of its adjacent locations. Most locations have four adjacent locations (up,
down, left, and right); locations on the edge or corner of the map have three
or two adjacent locations, respectively. (Diagonal locations do not count as
adjacent.)
In the above example, there are four low points, all highlighted: two are in
the first row (a 1 and a 0), one is in the third row (a 5), and one is in the
bottom row (also a 5). All other locations on the heightmap have some lower
adjacent location, and so are not low points.
The risk level of a low point is 1 plus its height. In the above example, the
risk levels of the low points are 2, 1, 6, and 6. The sum of the risk levels of
all low points in the heightmap is therefore 15.
Find all of the low points on your heightmap. What is the sum of the risk
levels of all low points on your heightmap?
Your puzzle answer was 530.
--- Part Two ---
Next, you need to find the largest basins so you know what areas are most
important to avoid.
A basin is all locations that eventually flow downward to a single low point.
Therefore, every low point has a basin, although some basins are very small.
Locations of height 9 do not count as being in any basin, and all other
locations will always be part of exactly one basin.
The size of a basin is the number of locations within the basin, including the
low point. The example above has four basins.
The top-left basin, size 3:
2199943210
3987894921
9856789892
8767896789
9899965678
The top-right basin, size 9:
2199943210
3987894921
9856789892
8767896789
9899965678
The middle basin, size 14:
2199943210
3987894921
9856789892
8767896789
9899965678
The bottom-right basin, size 9:
2199943210
3987894921
9856789892
8767896789
9899965678
Find the three largest basins and multiply their sizes together. In the above
example, this is 9 * 14 * 9 = 1134.
What do you get if you multiply together the sizes of the three largest basins?
Your puzzle answer was 1019494.
"""
|
the-stack_106_23652 | from __future__ import unicode_literals
import json
from urllib.parse import urlencode
import re
import sure # noqa
import moto.server as server
"""
Test the different server responses
"""
def test_cloudformation_server_get():
backend = server.create_backend_app("cloudformation")
stack_name = "test stack"
test_client = backend.test_client()
template_body = {"Resources": {}}
create_stack_resp = test_client.action_data(
"CreateStack", StackName=stack_name, TemplateBody=json.dumps(template_body)
)
create_stack_resp.should.match(
r"<CreateStackResponse>.*<CreateStackResult>.*<StackId>.*</StackId>.*</CreateStackResult>.*</CreateStackResponse>",
re.DOTALL,
)
stack_id_from_create_response = re.search(
"<StackId>(.*)</StackId>", create_stack_resp
).groups()[0]
list_stacks_resp = test_client.action_data("ListStacks")
stack_id_from_list_response = re.search(
"<StackId>(.*)</StackId>", list_stacks_resp
).groups()[0]
stack_id_from_create_response.should.equal(stack_id_from_list_response)
|
the-stack_106_23658 | INCLUDE_VALUE_IN_NAME = ["display"]
class StyleMap():
map = {}
def __init__(self, map):
self.map = map
pass
def removeById(self, id):
del self.map[id]
def getElementIds(self):
return set(self.map.keys())
def renameId(self, old_id, new_id):
if old_id in self.map:
self.map[new_id] = self.map[old_id]
del self.map[old_id]
def all_style_names(self):
"""Return all style names in this style map as a set
Include style values for certain styles"""
all_styles = set()
for _elementid, styles in self.map.items():
for style_name, style_value in styles.items():
if style_name in INCLUDE_VALUE_IN_NAME:
all_styles.add(f"{style_name}:{style_value}")
else:
all_styles.add(style_name)
return all_styles
def toJS(self):
"""
Create a string that will make style changes in javascript
Example Output:
var abeofmwlekrifj = document.getElementById("abeofmwlekrifj");
if (abeofmwlekrifj) {
abeofmwlekrifj.style["min-width"] = "200px";
abeofmwlekrifj.style["margin-left"] = "10em";
}
var zomelfjeiwle = document.getElementById("zomelfjeiwle");
if (zomelfjeiwle) {
zomelfjeiwle.style["background-color"] = "blue";
}
"""
ret_string = ""
for (elementId, styles) in self.map.items():
elementStyles = list(styles.items())
elementStyles.sort() # Sort alphabetical order by style name (to enforce the same order every time)
if elementStyles:
ret_string += f'var {elementId} = document.getElementById("{elementId}");\n'
ret_string += 'if (' + elementId + ') {\n'
for (style_name, style_value) in elementStyles:
ret_string += f' {elementId}.style["{style_name}"] = "{style_value}";\n'
ret_string += '}\n'
return ret_string |
the-stack_106_23659 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
import time
from six import string_types
from six.moves.urllib.request import urlopen
from .errors import RetryError
from .structures import LazyFunction
from .subprocess import run_command
from .utils import file_exists
class CheckEndpoints(LazyFunction):
def __init__(self, endpoints, timeout=1, attempts=60, wait=1):
self.endpoints = [endpoints] if isinstance(endpoints, string_types) else endpoints
self.timeout = timeout
self.attempts = attempts
self.wait = wait
def __call__(self):
last_endpoint = ''
last_error = ''
for _ in range(self.attempts):
for endpoint in self.endpoints:
last_endpoint = endpoint
try:
request = urlopen(endpoint, timeout=self.timeout)
except Exception as e:
last_error = str(e)
break
else:
status_code = request.getcode()
if 400 <= status_code < 600:
last_error = 'status {}'.format(status_code)
break
else:
break
time.sleep(self.wait)
else:
raise RetryError(
'Endpoint: {}\n'
'Error: {}'.format(
last_endpoint,
last_error
)
)
class CheckCommandOutput(LazyFunction):
def __init__(self, command, patterns, matches=1, stdout=True, stderr=True, attempts=60, wait=1):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.attempts = attempts
self.wait = wait
if not (self.stdout or self.stderr):
raise ValueError('Must capture stdout, stderr, or both.')
if isinstance(patterns, string_types):
patterns = [patterns]
self.patterns = [
re.compile(pattern, re.M) if isinstance(pattern, string_types) else pattern
for pattern in patterns
]
if matches == 'all':
self.matches = len(patterns)
else:
self.matches = matches
def __call__(self):
log_output = ''
exit_code = 0
for _ in range(self.attempts):
result = run_command(self.command, capture=True)
exit_code = result.code
if self.stdout and self.stderr:
log_output = result.stdout + result.stderr
elif self.stdout:
log_output = result.stdout
else:
log_output = result.stderr
matches = 0
for pattern in self.patterns:
if pattern.search(log_output):
matches += 1
if matches >= self.matches:
return matches
time.sleep(self.wait)
else:
raise RetryError(
'Command: {}\n'
'Exit code: {}\n'
'Captured Output: {}'.format(
self.command,
exit_code,
log_output
)
)
class CheckDockerLogs(CheckCommandOutput):
def __init__(self, identifier, patterns, matches=1, stdout=True, stderr=True, attempts=60, wait=1):
if file_exists(identifier):
command = ['docker-compose', '-f', identifier, 'logs']
else:
command = ['docker', 'logs', identifier]
super(CheckDockerLogs, self).__init__(
command, patterns, matches=matches, stdout=stdout, stderr=stderr, attempts=attempts, wait=wait
)
self.identifier = identifier
|
the-stack_106_23660 | from ...plugin import hook
from .trezor import TrezorPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(TrezorPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
|
the-stack_106_23662 | # -*- coding: utf-8 -*-
"""
Provides support to compose user-defined parse types.
Cardinality
------------
It is often useful to constrain how often a data type occurs.
This is also called the cardinality of a data type (in a context).
The supported cardinality are:
* 0..1 zero_or_one, optional<T>: T or None
* 0..N zero_or_more, list_of<T>
* 1..N one_or_more, list_of<T> (many)
.. doctest:: cardinality
>>> from parse_type import TypeBuilder
>>> from parse import Parser
>>> def parse_number(text):
... return int(text)
>>> parse_number.pattern = r"\d+"
>>> parse_many_numbers = TypeBuilder.with_many(parse_number)
>>> more_types = { "Numbers": parse_many_numbers }
>>> parser = Parser("List: {numbers:Numbers}", more_types)
>>> parser.parse("List: 1, 2, 3")
<Result () {'numbers': [1, 2, 3]}>
Enumeration Type (Name-to-Value Mappings)
-----------------------------------------
An Enumeration data type allows to select one of several enum values by using
its name. The converter function returns the selected enum value.
.. doctest:: make_enum
>>> parse_enum_yesno = TypeBuilder.make_enum({"yes": True, "no": False})
>>> more_types = { "YesNo": parse_enum_yesno }
>>> parser = Parser("Answer: {answer:YesNo}", more_types)
>>> parser.parse("Answer: yes")
<Result () {'answer': True}>
Choice (Name Enumerations)
-----------------------------
A Choice data type allows to select one of several strings.
.. doctest:: make_choice
>>> parse_choice_yesno = TypeBuilder.make_choice(["yes", "no"])
>>> more_types = { "ChoiceYesNo": parse_choice_yesno }
>>> parser = Parser("Answer: {answer:ChoiceYesNo}", more_types)
>>> parser.parse("Answer: yes")
<Result () {'answer': 'yes'}>
"""
from __future__ import absolute_import
from .cardinality import \
Cardinality, TypeBuilder as CardinalityTypeBuilder
from parse_type.cardinality import pattern_group_count
import enum
import inspect
import re
__all__ = ["TypeBuilder", "build_type_dict", "parse_anything"]
class TypeBuilder(CardinalityTypeBuilder):
"""
Provides a utility class to build type-converters (parse_types) for
the :mod:`parse` module.
"""
default_strict = True
default_re_opts = (re.IGNORECASE | re.DOTALL)
@classmethod
def make_list(cls, item_converter=None, listsep=','):
"""
Create a type converter for a list of items (many := 1..*).
The parser accepts anything and the converter needs to fail on errors.
:param item_converter: Type converter for an item.
:param listsep: List separator to use (as string).
:return: Type converter function object for the list.
"""
if not item_converter:
item_converter = parse_anything
return cls.with_cardinality(Cardinality.many, item_converter,
pattern=cls.anything_pattern, listsep=listsep)
@staticmethod
def make_enum(enum_mappings):
"""
Creates a type converter for an enumeration or text-to-value mapping.
:param enum_mappings: Defines enumeration names and values.
:return: Type converter function object for the enum/mapping.
"""
if (inspect.isclass(enum_mappings) and
issubclass(enum_mappings, enum.Enum)):
enum_class = enum_mappings
enum_mappings = enum_class.__members__
def convert_enum(text):
if text not in convert_enum.mappings:
text = text.lower() # REQUIRED-BY: parse re.IGNORECASE
return convert_enum.mappings[text] #< text.lower() ???
convert_enum.pattern = r"|".join(enum_mappings.keys())
convert_enum.mappings = enum_mappings
return convert_enum
@staticmethod
def _normalize_choices(choices, transform):
assert transform is None or callable(transform)
if transform:
choices = [transform(value) for value in choices]
else:
choices = list(choices)
return choices
@classmethod
def make_choice(cls, choices, transform=None, strict=None):
"""
Creates a type-converter function to select one from a list of strings.
The type-converter function returns the selected choice_text.
The :param:`transform()` function is applied in the type converter.
It can be used to enforce the case (because parser uses re.IGNORECASE).
:param choices: List of strings as choice.
:param transform: Optional, initial transform function for parsed text.
:return: Type converter function object for this choices.
"""
# -- NOTE: Parser uses re.IGNORECASE flag
# => transform may enforce case.
choices = cls._normalize_choices(choices, transform)
if strict is None:
strict = cls.default_strict
def convert_choice(text):
if transform:
text = transform(text)
if strict and not (text in convert_choice.choices):
values = ", ".join(convert_choice.choices)
raise ValueError("%s not in: %s" % (text, values))
return text
convert_choice.pattern = r"|".join(choices)
convert_choice.choices = choices
return convert_choice
@classmethod
def make_choice2(cls, choices, transform=None, strict=None):
"""
Creates a type converter to select one item from a list of strings.
The type converter function returns a tuple (index, choice_text).
:param choices: List of strings as choice.
:param transform: Optional, initial transform function for parsed text.
:return: Type converter function object for this choices.
"""
choices = cls._normalize_choices(choices, transform)
if strict is None:
strict = cls.default_strict
def convert_choice2(text):
if transform:
text = transform(text)
if strict and not (text in convert_choice2.choices):
values = ", ".join(convert_choice2.choices)
raise ValueError("%s not in: %s" % (text, values))
index = convert_choice2.choices.index(text)
return index, text
convert_choice2.pattern = r"|".join(choices)
convert_choice2.choices = choices
return convert_choice2
@classmethod
def make_variant(cls, converters, re_opts=None, compiled=False, strict=True):
"""
Creates a type converter for a number of type converter alternatives.
The first matching type converter is used.
REQUIRES: type_converter.pattern attribute
:param converters: List of type converters as alternatives.
:param re_opts: Regular expression options zu use (=default_re_opts).
:param compiled: Use compiled regexp matcher, if true (=False).
:param strict: Enable assertion checks.
:return: Type converter function object.
.. note::
Works only with named fields in :class:`parse.Parser`.
Parser needs group_index delta for unnamed/fixed fields.
This is not supported for user-defined types.
Otherwise, you need to use :class:`parse_type.parse.Parser`
(patched version of the :mod:`parse` module).
"""
# -- NOTE: Uses double-dispatch with regex pattern rematch because
# match is not passed through to primary type converter.
assert converters, "REQUIRE: Non-empty list."
if len(converters) == 1:
return converters[0]
if re_opts is None:
re_opts = cls.default_re_opts
pattern = r")|(".join([tc.pattern for tc in converters])
pattern = r"("+ pattern + ")"
group_count = len(converters)
for converter in converters:
group_count += pattern_group_count(converter.pattern)
if compiled:
convert_variant = cls.__create_convert_variant_compiled(converters,
re_opts, strict)
else:
convert_variant = cls.__create_convert_variant(re_opts, strict)
convert_variant.pattern = pattern
convert_variant.converters = tuple(converters)
convert_variant.group_count = group_count
return convert_variant
@staticmethod
def __create_convert_variant(re_opts, strict):
# -- USE: Regular expression pattern (compiled on use).
def convert_variant(text, m=None):
for converter in convert_variant.converters:
if re.match(converter.pattern, text, re_opts):
return converter(text)
# -- pragma: no cover
assert not strict, "OOPS-VARIANT-MISMATCH: %s" % text
return None
return convert_variant
@staticmethod
def __create_convert_variant_compiled(converters, re_opts, strict):
# -- USE: Compiled regular expression matcher.
for converter in converters:
matcher = getattr(converter, "matcher", None)
if not matcher:
converter.matcher = re.compile(converter.pattern, re_opts)
def convert_variant(text, m=None):
for converter in convert_variant.converters:
if converter.matcher.match(text):
return converter(text)
# -- pragma: no cover
assert not strict, "OOPS-VARIANT-MISMATCH: %s" % text
return None
return convert_variant
def build_type_dict(converters):
"""
Builds type dictionary for user-defined type converters,
used by :mod:`parse` module.
This requires that each type converter has a "name" attribute.
:param converters: List of type converters (parse_types)
:return: Type converter dictionary
"""
more_types = {}
for converter in converters:
assert callable(converter)
more_types[converter.name] = converter
return more_types
# -----------------------------------------------------------------------------
# COMMON TYPE CONVERTERS
# -----------------------------------------------------------------------------
def parse_anything(text, match=None, match_start=0):
"""
Provides a generic type converter that accepts anything and returns
the text (unchanged).
:param text: Text to convert (as string).
:return: Same text (as string).
"""
return text
parse_anything.pattern = TypeBuilder.anything_pattern
# -----------------------------------------------------------------------------
# Copyright (c) 2012-2013 by Jens Engel (https://github/jenisys/parse_type)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
the-stack_106_23663 | from dataclasses import dataclass, field
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
from braceexpand import braceexpand
from datasets import Dataset, load_dataset
from .text import TextNormalizer
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
def __post_init__(self):
self.multi_hosts = jax.process_count() > 1
# define data_files
if self.train_file is not None or self.validation_file is not None:
# accept braceexpand notation
for k in ["train_file", "validation_file"]:
f = getattr(self, k)
if isinstance(f, str):
setattr(self, k, list(braceexpand(f)))
# for list of files, split training data shards by host
if (
isinstance(self.train_file, list)
and self.multi_hosts
and self.shard_by_host
):
self.train_file = self.train_file[
jax.process_index() :: jax.process_count()
]
data_files = {
"train": self.train_file,
"validation": self.validation_file,
}
else:
data_files = None
# load dataset
dataset = load_dataset(
self.dataset_repo_or_path,
data_files=data_files,
streaming=self.streaming,
use_auth_token=self.use_auth_token,
)
if self.do_train:
if "train" not in dataset:
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
if self.streaming:
# we need to shuffle early in streaming mode
if hasattr(self, "train_dataset"):
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
else:
# prepare rng for later shuffling
if self.seed_dataset is None:
self.seed_dataset = np.random.get_state()[1][0]
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
# normalize text
if normalize_text:
text_normalizer = TextNormalizer()
partial_normalize_function = partial(
normalize_function,
text_column=self.text_column,
text_normalizer=text_normalizer,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(partial_normalize_function)
if self.streaming
else getattr(self, ds).map(
partial_normalize_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Normalizing datasets",
)
),
)
# preprocess
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
max_length=max_length,
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
def dataloader(self, split, batch_size, epoch=None):
def _dataloader_datasets_non_streaming(
dataset: Dataset,
rng: jax.random.PRNGKey = None,
):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if rng is set.
"""
steps_per_epoch = len(dataset) // batch_size
if rng is not None:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[
: steps_per_epoch * batch_size
] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch
def _dataloader_datasets_streaming(
dataset: Dataset,
epoch: int,
):
keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
batch = {k: [] for k in keys}
first_loop = True # stop after one loop in some cases
while (self.multi_hosts and split == "train") or first_loop:
# in multi-host, we run forever (no epoch) as hosts need to stop
# at the same time and training data may not be split equally
# For validation data we put the entire set on each host as we could lose
# too many samples on pods
if epoch is not None:
assert split == "train"
# reshuffle training data at each epoch
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k, v in item.items():
batch[k].append(v)
if len(batch[keys[0]]) == batch_size:
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
return _dataloader_datasets_streaming(ds, epoch)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, input_rng)
@property
def length(self):
len_train_dataset, len_eval_dataset = None, None
if self.streaming:
# we don't know the length, let's just assume max_samples if defined
if self.max_train_samples is not None:
len_train_dataset = self.max_train_samples
if self.max_eval_samples is not None:
len_eval_dataset = self.max_eval_samples
else:
len_train_dataset = (
len(self.train_dataset) if hasattr(self, "train_dataset") else None
)
len_eval_dataset = (
len(self.eval_dataset) if hasattr(self, "eval_dataset") else None
)
return len_train_dataset, len_eval_dataset
def shift_tokens_right(input_ids: np.array, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
return shifted_input_ids
def normalize_function(example, text_column, text_normalizer):
example[text_column] = text_normalizer(example[text_column])
return example
def preprocess_function(
examples,
tokenizer,
text_column,
encoding_column,
max_length,
decoder_start_token_id,
):
inputs = examples[text_column]
# Setting padding="max_length" as we need fixed length inputs for jitted functions
model_inputs = tokenizer(
inputs,
max_length=max_length,
padding="max_length",
truncation=True,
return_tensors="np",
)
# set up targets
# Note: labels correspond to our target indices
# decoder input ids are the same but shifted to the right with bos at the beginning (and without last token)
labels = examples[encoding_column]
labels = np.asarray(labels)
# We need the labels, in addition to the decoder_input_ids, for the compute_loss function
model_inputs["labels"] = labels
# In our case, this prepends the bos token and removes the last one
decoder_input_ids = shift_tokens_right(labels, decoder_start_token_id)
model_inputs["decoder_input_ids"] = decoder_input_ids
return model_inputs
|
the-stack_106_23665 | import argparse
import gym
from dqn.trainer import Trainer
from dqn.tester import Tester
from envs.env_wrappers import wrap_dqn
def main():
parser = argparse.ArgumentParser(description='Deep Q Network')
# 環境側のパラメータ
parser.add_argument('--env_name', default='PongNoFrameskip-v4', help='Environment name')
parser.add_argument('--width', type=int, default=84, help='Width of resized frame')
parser.add_argument('--height', type=int, default=84, help='Height of resized frame')
# DQNのアルゴリズムのパラメータ
parser.add_argument('--tmax', type=int, default=2000000, help='Number of action selections to finish learning.')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of training cases over which each SGD update is computed.')
parser.add_argument('--mem_size', type=int, default=10000,
help='SGD updates are sampled from this number of most recent frames.')
parser.add_argument('--history_len', type=int, default=4,
help='Number of most recent frames experienced '
'by the agent that are given as input to the Q-Network.')
parser.add_argument('--update_freq', type=int, default=1000,
help='Frequency (measured in the number of action selections) '
'with which the target network is updated.')
parser.add_argument('--discount_fact', type=float, default=0.99,
help='Discount factor gamma used in the Q-Learning update.')
parser.add_argument('--action_repeat', type=int, default=4,
help='Repeat each action selected by the agent this many times.')
parser.add_argument('--learn_freq', type=int, default=4,
help='Number of actions selected by the agent between successive SGD updates.')
parser.add_argument('--learn_rate', type=float, default=1e-4, help='Learning rate used by Adam.')
parser.add_argument('--fin_expl', type=float, default=0.01, help='Final value of ε in ε-greedy exploration.')
parser.add_argument('--expl_frac', type=float, default=0.1,
help='Fraction of entire training period over which the value of ε is annealed.')
parser.add_argument('--replay_st_size', type=int, default=10000,
help='Uniform random policy is run for this number of frames before learning starts '
'and the resulting experience is used to populate the replay memory.')
parser.add_argument('--no_op_max', type=int, default=30,
help='Maximum number of "do nothing" actions to be performed '
'by the agent at the start of an episode.')
# 学習時の設定
parser.add_argument('--test', action='store_true', help='Whether to test')
parser.set_defaults(test=False)
parser.add_argument('--render', action='store_true', help='Whether to render')
parser.set_defaults(render=False)
parser.add_argument('--save_network_freq', type=int, default=100000,
help='Frequency (measured in the number of action selections) '
'with which the Q-Network is saved.')
parser.add_argument('--save_network_path', default='saved_networks', help='Path to save Q-Network.')
parser.add_argument('--save_summary_path', default='summary', help='Path to save summary.')
parser.add_argument('--save_option_name', default='', help='Option saving name')
args = parser.parse_args()
env = gym.make(args.env_name)
env = wrap_dqn(env, args.history_len, args.action_repeat, args.no_op_max)
# 学習またはテスト実行
if args.test:
tester = Tester(env, args)
tester.test()
else:
trainer = Trainer(env, args)
trainer.learn()
if __name__ == '__main__':
main()
|
the-stack_106_23666 | from reportlab.lib.pagesizes import letter, landscape
from reportlab.lib.units import inch
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_CENTER
from reportlab.lib import colors
from reportlab.graphics.charts.piecharts import Pie
from reportlab.graphics.shapes import * # not used?
from reportlab.graphics.charts.barcharts import VerticalBarChart
from reportlab.graphics.charts.textlabels import Label
import energyusage.convert as convert
import energyusage.evaluate as evaluate
import locate
import math
year = "2016"
styles = getSampleStyleSheet()
TitleStyle = ParagraphStyle(name='Normal', fontSize=16, alignment= TA_CENTER, fontName="Times-Bold")
SubtitleStyle = ParagraphStyle(name='Normal',fontSize=12, alignment= TA_CENTER, fontName="Times-Roman")
# MonospacedSubtitleStyle = ParagraphStyle(name='Normal',fontSize=12, alignment= TA_CENTER, fontName="Courier")
HeaderStyle = ParagraphStyle(name='Normal',fontSize=16)
SubheaderStyle = ParagraphStyle(name='Normal', fontName="Times-Roman")
DescriptorStyle = ParagraphStyle(name='Normal',fontSize=14, alignment= TA_CENTER)
BodyTextStyle = styles["BodyText"]
def bold(text):
return "<b>"+text+"</b>"
def title(text, Elements, style=TitleStyle, klass=Paragraph, sep=0.3):
""" Creates title of report """
t = klass(bold(text), style)
Elements.append(t)
def subtitle(text, Elements, style=SubtitleStyle, klass=Paragraph, sep=0.1, spaceBefore=True, spaceAfter = True):
""" Creates descriptor text for a (sub)section; sp adds space before text """
s = Spacer(0, 1.5*sep*inch)
if spaceBefore:
Elements.append(s)
d = klass(text, style)
Elements.append(d)
if spaceAfter:
Elements.append(s)
def readings_and_mix_table(reading_data, mix_data, breakdown, state_emission, location, Elements):
'''
Creates 2 tables that are then embedded as the columns of 1 bigger table
'''
no_rows = 1 # not used
no_cols = 1
col_size = 4.5
readings_table = Table(reading_data, no_cols*[col_size/2*inch], 5*[0.25*inch] + [0.3*inch], hAlign="LEFT")
readings_table.setStyle(TableStyle([('FONT', (0,0), (-1,-1), "Times-Roman"),
('FONT', (0,0), (-1,0), "Times-Bold"),
('FONTSIZE', (0,0), (-1,-1), 12),
('FONTSIZE', (0,0), (-1,0), 13),
('ALIGN', (0,0), (0,-1), "RIGHT"),
('VALIGN', (-1,-1), (-1,-1), "TOP")]))
d = Drawing(100, 100)
pc = Pie()
data = []
if state_emission:
data = ["Coal", "Oil", "Natural Gas", "Low Carbon"]
else:
data = ["Coal", "Petroleum", "Natural Gas", "Low Carbon"]
for i in range(4):
data[i] += ": " + str(round(breakdown[i], 1)) + "%"
pc.x = 45
pc.y = 0
pc.width = 55
pc.height = 55
pc.data = breakdown[:4]
pc.slices[0].fillColor = colors.Color(202.0/255, 0.0/255, 32.0/255)
pc.slices[1].fillColor = colors.Color(244.0/255, 165.0/255, 130.0/255)
pc.slices[2].fillColor = colors.Color(5.0/255, 113.0/255, 176.0/255)
pc.slices[3].fillColor = colors.Color(146.0/255, 197.0/255, 222.0/255)
pc.labels = data
pc.slices.strokeWidth=0.5
pc.sideLabels = True
d.add(pc)
mix_data = [['Energy Mix Data'], [d], ['Location: ' + location]]
mix_table = Table(mix_data, no_cols*[col_size/2*inch], [.25*inch, 1*inch, .3*inch], hAlign="RIGHT")
mix_table.setStyle(TableStyle([('FONT', (0,0), (-1,-1), "Times-Roman"),
('FONT', (0,0), (0,0), "Times-Bold"),
('FONTSIZE', (0,0), (0,0), 13),
('FONTSIZE', (-1,-1), (-1,-1), 12),
('ALIGN', (0,0), (0,0), "LEFT")]))
table_data = [(readings_table, mix_table)]
t = Table(table_data, [4.25*inch, 3*inch], hAlign='CENTER')
t.setStyle(TableStyle([('VALIGN', (-1,-1), (-1,-1), "TOP")]))
Elements.append(t)
def kwh_and_emissions_table(data, Elements):
s = Spacer(9*inch, .2*inch)
Elements.append(s)
no_rows = 1
no_cols = 2
col_size = 2
t = Table(data, [2.75*inch, 2.15*inch],[.25*inch, .29*inch], hAlign="CENTER")
t.setStyle([('FONT',(0,0),(-1,-1),"Times-Roman"),
('FONT',(0,0),(0,-1),"Times-Bold"),
('FONTSIZE', (0,0), (-1,-1), 12),
('ALIGN', (0,0), (0,-1), "RIGHT"),
('ALIGN',(1,1),(1,-1), "LEFT"),
('BOX', (0,0), (-1,-1), 1, colors.black),
('VALIGN', (0,0), (-1,-1), "TOP")])
Elements.append(t)
def equivs_and_emission_equivs(equivs_data, emissions_data, Elements):
'''
Creates a table with 2 columns, each with their own embedded table
The embedded tables contain 2 vertically-stacked tables, one for the header
and the other one for the actual data in order to have better alignment
The first row of the 2nd vertically-stacked table is smaller than the rest in
order to remove the extra space and make these tables look cohesive with the
energy usage readings and energy mix tables
Setup:
* Table(data[array of arrays, one for each row], [column widths], [row heights])
* Spacer(width, height)
'''
s = Spacer(9*inch, .2*inch)
Elements.append(s)
no_rows = 1
no_cols = 1
col_size = 4.5
equivs_header_data = [["Assumed Carbon Equivalencies"]]
# Table(data)
equivs_header_table = Table(equivs_header_data, [3*inch], [.25*inch])
equivs_header_table.setStyle(TableStyle([('FONT',(0,0),(0,-1),"Times-Bold"),
('FONTSIZE', (0,0), (-1,-1), 13)]))
equivs_data_table = Table(equivs_data, [1*inch, 2*inch], [0.17*inch, 0.25*inch, 0.25*inch, 0.25*inch],hAlign="LEFT")
equivs_data_table.setStyle(TableStyle([('FONT', (0,0), (-1,-1), "Times-Roman"),
('FONTSIZE', (0,0), (-1,-1), 12),
('ALIGN', (0,0), (0,-1), "RIGHT"),
('VALIGN', (-1,-1), (-1,-1), "TOP")]))
t1_data = [[equivs_header_table],[equivs_data_table]]
t1 = Table(t1_data, [3*inch])
emission_equiv_para = Paragraph('<font face="times" size=13><strong>CO<sub rise = -10 size = 8>2</sub>' +
' Emissions Equivalents</strong></font>', style = styles["Normal"])
emissions_header_data = [[emission_equiv_para]]
emissions_header_table = Table(emissions_header_data, [3*inch], [.25*inch])
emissions_header_table.setStyle(TableStyle([('FONT',(0,0),(0,-1),"Times-Bold"),
('FONTSIZE', (0,0), (-1,-1), 13)]))
emissions_data_table = Table(emissions_data, [2.1*inch, 1.5*inch], [0.17*inch, 0.25*inch, 0.25*inch],hAlign="LEFT")
emissions_data_table.setStyle(TableStyle([('FONT', (0,0), (-1,-1), "Times-Roman"),
('FONTSIZE', (0,0), (-1,-1), 12),
('ALIGN', (0,0), (0,-1), "RIGHT"),
('VALIGN', (-1,-1), (-1,-1), "TOP")]))
t2_data = [[emissions_header_table],[emissions_data_table]]
t2 = Table(t2_data, [3*inch])
table_data = [(t1, t2)]
t = Table(table_data, [4.25*inch, 3*inch], hAlign='CENTER')
t.setStyle(TableStyle([('VALIGN', (-1,-1), (-1,-1), "TOP")]))
Elements.append(t)
def gen_bar_graphs(comparison_values, location, emission):
bc = VerticalBarChart()
labels = []
data = []
comparison_values.append([location, emission])
comparison_values.sort(key = lambda x: x[1])
for pair in comparison_values:
labels.append(pair[0])
data.append(pair[1])
data = [data]
location_index = labels.index(location)
bc.x = -150
bc.y = -110
bc.height = 100
bc.width = 150
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = data[0][-1] + data[0][-1] * .1
distance = abs(int(math.log10(abs(data[0][-1])))) + 1 # distance of 1 significant figure to decimal point
bc.valueAxis.valueStep = float(format(data[0][-1], '.1g')) / 3
bc.valueAxis.labelTextFormat = '%0.' + str(distance) + 'g'
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = labels
for i in range(len(labels)):
bc.bars[(0, i)].fillColor = colors.Color(166.0/255, 189.0/255, 219.0/255)
bc.bars[(0, location_index)].fillColor = colors.Color(28.0/255, 144.0/255, 153.0/255)
return bc
def comparison_graphs(comparison_values, location, emission, default_emissions, default_location, Elements):
s = Spacer(9*inch, .2*inch)
Elements.append(s)
drawing = Drawing(0, 0)
if not default_location:
bc = gen_bar_graphs(comparison_values, location, emission)
bc.y = -120
bc.height = 125
bc.width = 300
drawing.add(bc)
else:
bc1 = gen_bar_graphs(default_emissions[:3], location, emission)
bc2 = gen_bar_graphs(default_emissions[3:6], location, emission)
bc3 = gen_bar_graphs(default_emissions[6:], location, emission)
offset = -257
bc1.x = -10 + offset
bc2.x = 190 + offset
bc3.x = 390 + offset
drawing.add(bc1)
drawing.add(bc2)
drawing.add(bc3)
label_offset = offset + 80
label1, label2, label3 = Label(), Label(), Label()
label1.setText("Global (excluding Europe and US)")
label1.x, label1.y = -17 + label_offset, -160
label1.fontName = "Times-Bold"
label2.setText("Europe")
label2.x, label2.y = 185 + label_offset, -160
label2.fontName = "Times-Bold"
label3.setText("United States")
label3.x, label3.y = 385 + label_offset, -160
label3.fontName = "Times-Bold"
drawing.add(label1)
drawing.add(label2)
drawing.add(label3)
if_elsewhere_para = Paragraph('<font face="times" size=12>Kilograms of CO<sub rise = -10 size' +
' = 8>2 </sub> emissions for the function if the computation had been performed elsewhere</font>', style = styles["Normal"])
graph_data = [['Emission Comparison'], [if_elsewhere_para], [drawing]]
graph_table = Table(graph_data, [6.5*inch], [.25*inch, .25*inch, .25*inch], hAlign="CENTER")
graph_table.setStyle(TableStyle([('FONT', (0,0), (0,0), "Times-Bold"),
('FONT', (0,1),(0,1),"Times-Roman"),
('FONTSIZE', (0,0), (0,0), 13),
('FONTSIZE', (0,1), (0,1), 12),
('ALIGN', (0,0), (-1,-1), "CENTER")]))
Elements.append(graph_table)
def report_header(kwh, emission, Elements):
effective_emission = Paragraph('<font face="times" size=12>{:.2e} kg CO<sub rise = -10 size = 8>2 </sub></font>'.format(emission), style = styles["Normal"])
# Total kWhs used and effective emissions
kwh_and_emissions_data = [["Total kilowatt hours used:", "{:.2e} kWh".format(kwh)],
["Effective emissions:", effective_emission]]
kwh_and_emissions_table(kwh_and_emissions_data, Elements)
def report_equivalents(emission, state_emission, Elements):
# Equivalencies and CO2 emission equivalents
per_house = Paragraph('<font face="times" size=12>% of CO<sub rise = -10 size = 8>2</sub> per US house/day:</font>'.format(emission), style = styles["Normal"])
emissions_data = [
['Miles driven:', "{:.2e} miles".format(convert.carbon_to_miles(emission))],
['Min. of 32-in. LCD TV:', "{:.2e} minutes".format(convert.carbon_to_tv(emission))],
[per_house, \
"{:.2e}%".format(convert.carbon_to_home(emission))]]
coal_para = Paragraph('<font face="times" size=12>996 kg CO<sub rise = -10 size = 8>2 </sub>/MWh</font>', style = styles["Normal"])
oil_para = Paragraph('<font face="times" size=12>817 kg CO<sub rise = -10 size = 8>2 </sub>/MWh</font>', style = styles["Normal"])
gas_para = Paragraph('<font face="times" size=12>744 kg CO<sub rise = -10 size = 8>2 </sub>/MWh</font>', style = styles["Normal"])
low_para = Paragraph('<font face="times" size=12>0 kg CO<sub rise = -10 size = 8>2 </sub>/MWh</font>', style = styles["Normal"])
if state_emission:
equivs_data = [['Coal:', coal_para],
['Oil:', oil_para],
['Natural gas:', gas_para],
['Low carbon:', low_para]]
else:
equivs_data = [['Coal:', coal_para],
['Petroleum:', oil_para],
['Natural gas:', gas_para],
['Low carbon:', low_para]]
equivs_and_emission_equivs(equivs_data, emissions_data, Elements)
# utils.log("Assumed Carbon Equivalencies")
# utils.log("Emissions", emission)
def generate(location, watt_averages, breakdown, kwh_and_emissions, func_info, \
comparison_values, default_emissions, default_location):
# TODO: remove state_emission and just use location
""" Generates the entire pdf report
Parameters:
location (str): user's location, locations=["Romania", "Brazil"]
watt_averages (list): list of baseline, total, process wattage, process duration
breakdown (list): [% coal, % oil/petroleum, % natural gas, % low carbon]
kwh_and_emissions (list): [kwh used, emission in kg CO2, state emission > 0 for US states]
func_info (list): [user func name, user func args (0 or more)]
"""
Elements = []
kwh, emission, state_emission = kwh_and_emissions
baseline_average, process_average, difference_average, process_duration = watt_averages
# Initializing document
doc = SimpleDocTemplate("energy-usage-report.pdf",pagesize=landscape(letter), topMargin=.3*inch)
title("Energy Usage Report", Elements)
# Handling header with function name and arguments
func_name, *func_args = func_info
info_text = " for the function " + func_name
if len(func_args) > 0:
if len(func_args) == 1:
info_text += " with the input " + str(func_args[0]) + "."
else:
info_text += " with the inputs "
for arg in func_args:
info_text += arg + ","
info_text = info_text[len(info_text)-1] + "."
else:
info_text += "."
subtitle("Energy usage and carbon emissions" + info_text, Elements, spaceBefore=True)
# Energy Usage Readings and Energy Mix Data
readings_data = [['Energy Usage Readings', ''],
['Average baseline wattage:', "{:.2f} watts".format(baseline_average)],
['Average total wattage:', "{:.2f} watts".format(process_average)],
['Average process wattage:', "{:.2f} watts".format(difference_average)],
['Process duration:', process_duration],
['','']] #hack for the alignment
if state_emission:
coal, oil, natural_gas, low_carbon = breakdown
mix_data = [['Energy Mix Data', ''],
['Coal', "{:.2f}%".format(coal)],
['Oil', "{:.2f}%".format(oil)],
['Natural gas', "{:.2f}%".format(natural_gas)],
['Low carbon', "{:.2f}%".format(low_carbon)]]
else:
coal, petroleum, natural_gas, low_carbon = breakdown
mix_data = [['Energy Mix Data', ''],
['Coal', "{:.2f}%".format(coal)],
['Petroleum', "{:.2f}%".format(petroleum)],
['Natural gas', "{:.2f}%".format(natural_gas)],
['Low carbon', "{:.2f}%".format(low_carbon)]]
readings_and_mix_table(readings_data, mix_data, breakdown, state_emission, location, Elements)
report_header(kwh, emission, Elements)
report_equivalents(emission, state_emission, Elements)
comparison_graphs(comparison_values, location, emission, default_emissions, default_location, Elements)
doc.build(Elements)
|
the-stack_106_23668 | #!/usr/bin/env python
"""Setup script for ConferenceScheduler."""
import setuptools
from conference_scheduler import __project__, __version__
import os
if os.path.exists('README.rst'):
README = open('README.rst').read()
else:
README = "" # a placeholder, readme is generated on release
CHANGES = open('CHANGES.md').read()
setuptools.setup(
name=__project__,
version=__version__,
description="ConferenceScheduler is a Python 3 package template.",
url='https://github.com/DanLindeman/conference-scheduler',
author='Dan Lindeman',
author_email='[email protected]',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=(README + '\n' + CHANGES),
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
],
install_requires=open('requirements.txt').readlines(),
)
|
the-stack_106_23670 | # -*- test-case-name: foolscap.test.test_banana -*-
from __future__ import print_function
from twisted.python import log
from twisted.internet.defer import Deferred
from foolscap.tokens import Violation
from foolscap.slicer import BaseSlicer, BaseUnslicer
from foolscap.constraint import OpenerConstraint, Any, IConstraint
from foolscap.util import AsyncAND
class ListSlicer(BaseSlicer):
opentype = ("list",)
trackReferences = True
slices = list
def sliceBody(self, streamable, banana):
for i in self.obj:
yield i
class ListUnslicer(BaseUnslicer):
opentype = ("list",)
maxLength = None
itemConstraint = None
debug = False
def setConstraint(self, constraint):
if isinstance(constraint, Any):
return
assert isinstance(constraint, ListConstraint)
self.maxLength = constraint.maxLength
self.itemConstraint = constraint.constraint
def start(self, count):
#self.opener = foo # could replace it if we wanted to
self.list = []
self.count = count
if self.debug:
log.msg("%s[%d].start with %s" % (self, self.count, self.list))
self.protocol.setObject(count, self.list)
self._ready_deferreds = []
def checkToken(self, typebyte, size):
if self.maxLength != None and len(self.list) >= self.maxLength:
# list is full, no more tokens accepted
# this is hit if the max+1 item is a primitive type
raise Violation("the list is full")
if self.itemConstraint:
self.itemConstraint.checkToken(typebyte, size)
def doOpen(self, opentype):
# decide whether the given object type is acceptable here. Raise a
# Violation exception if not, otherwise give it to our opener (which
# will normally be the RootUnslicer). Apply a constraint to the new
# unslicer.
if self.maxLength != None and len(self.list) >= self.maxLength:
# this is hit if the max+1 item is a non-primitive type
raise Violation("the list is full")
if self.itemConstraint:
self.itemConstraint.checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
if self.itemConstraint:
unslicer.setConstraint(self.itemConstraint)
return unslicer
def update(self, obj, index):
# obj has already passed typechecking
if self.debug:
log.msg("%s[%d].update: [%d]=%s" % (self, self.count, index, obj))
assert isinstance(index, int)
self.list[index] = obj
return obj
def receiveChild(self, obj, ready_deferred=None):
if ready_deferred:
self._ready_deferreds.append(ready_deferred)
if self.debug:
log.msg("%s[%d].receiveChild(%s)" % (self, self.count, obj))
# obj could be a primitive type, a Deferred, or a complex type like
# those returned from an InstanceUnslicer. However, the individual
# object has already been through the schema validation process. The
# only remaining question is whether the larger schema will accept
# it.
if self.maxLength != None and len(self.list) >= self.maxLength:
# this is redundant
# (if it were a non-primitive one, it would be caught in doOpen)
# (if it were a primitive one, it would be caught in checkToken)
raise Violation("the list is full")
if isinstance(obj, Deferred):
if self.debug:
log.msg(" adding my update[%d] to %s" % (len(self.list), obj))
obj.addCallback(self.update, len(self.list))
obj.addErrback(self.printErr)
placeholder = "list placeholder for arg[%d], rd=%s" % \
(len(self.list), ready_deferred)
self.list.append(placeholder)
else:
self.list.append(obj)
def printErr(self, why):
print("ERR!")
print(why.getBriefTraceback())
log.err(why)
def receiveClose(self):
ready_deferred = None
if self._ready_deferreds:
ready_deferred = AsyncAND(self._ready_deferreds)
return self.list, ready_deferred
def describe(self):
return "[%d]" % len(self.list)
class ListConstraint(OpenerConstraint):
"""The object must be a list of objects, with a given maximum length. To
accept lists of any length, use maxLength=None. All member objects must
obey the given constraint."""
opentypes = [("list",)]
name = "ListConstraint"
def __init__(self, constraint, maxLength=None, minLength=0):
self.constraint = IConstraint(constraint)
self.maxLength = maxLength
self.minLength = minLength
def checkObject(self, obj, inbound):
if not isinstance(obj, list):
raise Violation("not a list")
if self.maxLength is not None and len(obj) > self.maxLength:
raise Violation("list too long")
if len(obj) < self.minLength:
raise Violation("list too short")
for o in obj:
self.constraint.checkObject(o, inbound)
|
the-stack_106_23671 | from types import SimpleNamespace
from syncx import manage
from syncx import tag
from syncx.manager import Manager
from syncx.manager import ManagerInterface
from syncx.serializer import JsonSerializer
from syncx.serializer import YamlSerializer
def test_get_serializer():
assert Manager.get_serializer('foo') is YamlSerializer
assert Manager.get_serializer('foo.yml') is YamlSerializer
assert Manager.get_serializer('foo.yaml') is YamlSerializer
assert Manager.get_serializer('foo.json') is JsonSerializer
def test_interface():
my_data = {'value': 'initial'}
my_data = tag(my_data)
manager_interface = manage(my_data)
assert type(manager_interface) == ManagerInterface
manager_interface_2 = manage(my_data)
assert manager_interface.history == manager_interface_2.history
def test_start_sync__defaults(get_test_data_file, tmp_path):
expected_contents = get_test_data_file('dump.yaml')
my_data = {'a': ['b', {'c': 0, 'd': 1}], 'e': {1}}
manager = Manager()
already_wrapped = tag(my_data)
wrapped = manager.start_sync(already_wrapped, str(tmp_path / 'test.yaml'))
assert wrapped == already_wrapped
assert (tmp_path / 'test.yaml').read_text() == expected_contents
def test_start_sync__file_exists(path_to_test_data):
initial_data = tag({})
name = str(path_to_test_data / 'dump.yaml')
wrapped = initial_data._manager.start_sync(initial_data, name)
assert wrapped == {'a': ['b', {'c': 0, 'd': 1}], 'e': {1}}
def test_start_sync__file_exists__custom_type(path_to_test_data):
initial_data = tag(SimpleNamespace)
name = str(path_to_test_data / 'dump.yaml')
wrapped = initial_data._manager.start_sync(initial_data, name)
assert wrapped.a == ['b', {'c': 0, 'd': 1}]
assert wrapped.e == {1}
|
the-stack_106_23672 | from warnings import warn
from .Abi import dec_uint
from .JsonRpc import JsonRpc
from .JsonRpc import JsonRpcError
class BlockDataDict(object):
'''
Represents the parsed return of the Block data.
'''
integer_fields = ["nonce", "number", "difficulty", "totalDifficulty", "size", "gasLimit", "gasUsed", "timestamp"]
def __init__(self, data):
for key in data.keys():
if key in self.integer_fields:
setattr(self,key,dec_uint(data[key]))
else:
setattr(self,key,data[key])
def __repr__(self):
return 'BlockLogDict(%s)' % str(dict(self))
def __iter__(self):
for k in self.__dict__.keys():
if k == "integer_fields":
continue
yield k, self.__dict__[k]
def __getitem__(self,key):
return getattr(self,key)
class TransactionDict(object):
'''
Represent the transction
'''
integer_fields = ["blockNumber", "gas", "gasPrice", "value", "nonce"]
def __init__(self, data):
for key in data.keys():
if key in self.integer_fields:
setattr(self,key,dec_uint(data[key]))
else:
setattr(self,key,data[key])
def __repr__(self):
return 'TransactionDict(%s)' % str(dict(self))
def __iter__(self):
for k in self.__dict__.keys():
if k == "integer_fields":
continue
yield k, self.__dict__[k]
def __getitem__(self,key):
return getattr(self,key)
class CommittedTransaction(object):
def __init__(self, transactionHash, jsonrpc_provider):
self.transactionHash = transactionHash
self.jsonrpc_provider = jsonrpc_provider
self.receipt_returned = None
def __str__(self):
return 'CommittedTransaction(%s)' % self.transactionHash
def receipt(self):
uint_keys = ['blockNumber', 'cumulativeGasUsed', 'gasUsed', 'status', 'transactionIndex']
if self.receipt_returned != None:
return self.receipt_returned
response = self.jsonrpc_provider.eth_getTransactionReceipt(self.transactionHash)
if 'result' in response:
if response['result'] == None:
return None
receipt = response['result']
for key in uint_keys:
receipt[key] = dec_uint(receipt[key])
self.receipt_returned = receipt
return receipt
else:
raise JsonRpcError(str(response))
class NetworkUtil(object):
'''
A class to contain all network attributes and contract's methods/functions
'''
def __init__(self,provider=None,basicauth=()):
if provider is not None:
self.jsonrpc_provider = provider
if basicauth != ():
self.jsonrpc_provider.auth = basicauth
self.__chainId = None
def getTransactionByHash(self, txHash):
if isinstance(self.__jsonrpc_provider, JsonRpc):
if isinstance(txHash, str):
if not txHash.startswith('0x'):
txHash = '0x' + txHash
response = self.jsonrpc_provider.eth_getTransactionByHash(txHash)
else:
raise TypeError('getTransactionByHash(): txHash must be a hexstring')
if 'result' in response:
return TransactionDict(response['result'])
else:
raise TypeError('getTransactionByHash(): unable to found a valid JsonRpc Provider')
def getBlockByNumber(self, blockNumber, withTx=False):
if isinstance(self.__jsonrpc_provider, JsonRpc):
if isinstance(blockNumber, str):
if blockNumber.startswith('0x') or blockNumber in ["earliest","latest","pending"]:
response = self.jsonrpc_provider.eth_getBlockByNumber(blockNumber, withTx)
else:
raise TypeError('getBlockByNumber(): blockNumber must be a hexstring or an integer')
elif isinstance(blockNumber, int):
response = self.jsonrpc_provider.eth_getBlockByNumber(hex(blockNumber), withTx)
else:
raise TypeError('getBlockByNumber(): blockNumber must be a hexstring or an integer')
if 'result' in response:
return BlockDataDict(response['result'])
@property
def chainId(self):
if self.__chainId != None:
return self.__chainId
else:
try:
response = self.jsonrpc_provider.eth_chainId()
if 'result' in response:
self.__chainId = response['result']
else:
warn('jsonrpc_provider: No support eth_chainId() method -> ' + str(response))
self.__chainId = None
except Exception as e:
warn('jsonrpc_provider: throw ->' + str(e))
self.__chainId = None
return self.__chainId
@property
def blockNumber(self):
if isinstance(self.__jsonrpc_provider, JsonRpc):
response = self.jsonrpc_provider.eth_blockNumber()
if 'result' in response:
return dec_uint(response['result'])
else:
raise JsonRpcError(str(response))
else:
return None
@blockNumber.setter
def blockNumber(self, blockNumber):
raise AttributeError('Only the network can set a blockNumer')
@property
def jsonrpc_provider(self):
return self.__jsonrpc_provider
@jsonrpc_provider.setter
def jsonrpc_provider(self, jsonrpc_provider):
if isinstance(jsonrpc_provider, JsonRpc):
self.__jsonrpc_provider = jsonrpc_provider
else:
self.__jsonrpc_provider = JsonRpc(jsonrpc_provider)
|
the-stack_106_23674 | import numpy as np
from core.variable import Variable
from core.constraint import Constraint
from core.agent_state import AgentState
class Agent:
def __init__(self, name, variables=[], constraints=[], seed=1234):
self.name = name
self.variables = variables.copy()
# the set of constraints involving variables of this agent in their scope
self.constraints = constraints.copy()
# the set of constraints controlled by this agent
self.controlled_constraints = constraints.copy()
self.prng = np.random.RandomState(seed)
self.neighbors = []
self.state = AgentState(name, self, seed)
def addNeighbor(self, neighbor, con):
if neighbor not in self.neighbors:
self.neighbors.append(neighbor)
self.state.addNeighborsVariables(neighbor)
# Initialize controlled constraints: This agent controls the constraint f only if f has in its scope var x
# this agent controls x and x is the variable with smallest index among all vars in the scope of f
if con in self.controlled_constraints:
v_agt = [v.name for v in self.variables]
v_con = sorted([v.name for v in con.scope])
if v_con[0] not in v_agt:
self.controlled_constraints.remove(con)
def setRandomAssignment(self):
'''Initializes values of all its variables to random values'''
for v in self.variables:
v.setRandomAssignment()
def setStateAssignment(self):
'''Sets the agent variable assignment equal to those in its state'''
for var in self.variables:
var.setAssignment(self.state.variables_assignments[var.name])
def __str__(self):
return 'agent: ' + str(self.name) \
+ '\tN='+ str([a.name for a in self.neighbors]) \
+ '\t controls:' \
+ str([var.name for var in self.variables]) \
+ ' constraints: ' + str([con.name for con in self.constraints]) \
+ ' controlled: ' + str([con.name for con in self.controlled_constraints]) |
the-stack_106_23675 | import sys
import scipy.io as sio
import glob
## OBJ file
#v -0.3925 -0.8111 2.0260
s = int(sys.argv[1])
with open('scenes', 'r') as fin:
scene_id = fin.readlines()[s].strip()
mats = glob.glob('scannet/%s/*.mat' % scene_id)
for mat_f in mats:
obj = mat_f.replace('.mat', '.obj')
mat = sio.loadmat(mat_f)
#print(mat.keys())
with open(obj, 'w') as fout:
fout.write('# OBJ file\n')
v = mat['vertex']
assert v.shape[0] == 3
for i in range(v.shape[1]):
fout.write('v %.4f %.4f %.4f\n' % (v[0, i], v[1, i], v[2, i]))
|
the-stack_106_23677 | # search_helpers.py
from typing import List
from pybraries.helpers import sess, extract
from pybraries.make_request import make_request
def search_api(action, *args, **kwargs):
"""
build and call for search
Args:
action (str): function action name
*args (str): positional arguments
**kwargs (str): keyword arguments
Returns:
(list): list of dicts response from libraries.io.
according to page and per page
Many are dicts or list of dicts.
"""
kind = "get"
url_end_list = handle_path_params(action, *args, **kwargs)
handle_query_params(action, **kwargs)
url_combined = "/".join(url_end_list)
return make_request(url_combined, kind)
def handle_query_params(action, **kwargs):
if action == "special_project_search":
try:
sess.params["q"] = kwargs["keywords"]
except:
print("A string of keywords must be passed as a keyword argument")
if "platforms" in kwargs:
sess.params["platforms"] = kwargs["platforms"]
if "licenses" in kwargs:
sess.params["licenses"] = kwargs["licenses"]
if "languages" in kwargs:
sess.params["languages"] = kwargs["languages"]
elif "project" in kwargs:
sess.params["q"] = kwargs["project"]
if "filters" in kwargs:
extract(*list(kwargs["filters"].keys())).of(kwargs["filters"]).then(
sess.params.__setitem__
)
if "sort" in kwargs:
sess.params["sort"] = kwargs["sort"]
if "page" in kwargs:
sess.params["page"] = kwargs["page"]
if "per_page" in kwargs:
sess.params["per_page"] = kwargs["per_page"]
def handle_path_params(action, *args, **kwargs):
def from_kwargs(*keys):
return extract(*keys).of(kwargs).then([].append)
url_end_list: List[str] = ["https://libraries.io/api"] # start of list to build url
if action == "special_project_search":
url_end_list.append("search?")
elif action == "platforms":
url_end_list.append("platforms")
elif action.startswith("project"):
action = action[7:] # remove action prefix
url_end_list += [*from_kwargs("platforms", "project"), *args]
if action.startswith("_"):
action = action[1:] # remove remaining underscore from operation name
if action == "dependencies":
version = kwargs.pop("version") or "latest" # defaults to latest
url_end_list.append(version)
url_end_list.append(action)
elif action.startswith("repository"):
action = action[len("repository") :]
url_end_list += [*from_kwargs("host", "owner", "repo"), *args]
if action.startswith("_"):
url_end_list.append(action[1:])
elif "user" in action:
url_end_list += [*from_kwargs("host", "user"), *args]
if action == "user_repositories":
url_end_list.append("repositories")
if action == "user_projects":
url_end_list.append("projects")
if action == "user_projects_contributions":
url_end_list.append("project-contributions")
if action == "user_repositories_contributions":
url_end_list.append("repository-contributions")
if action == "user_dependencies":
url_end_list.append("dependencies")
return url_end_list
|
the-stack_106_23683 | # coding: utf-8
import pprint
import re
import six
class ListResourceTypesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'resource_type_code': 'str'
}
attribute_map = {
'x_language': 'X-Language',
'resource_type_code': 'resource_type_code'
}
def __init__(self, x_language='zh_cn', resource_type_code=None):
"""ListResourceTypesRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._resource_type_code = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if resource_type_code is not None:
self.resource_type_code = resource_type_code
@property
def x_language(self):
"""Gets the x_language of this ListResourceTypesRequest.
:return: The x_language of this ListResourceTypesRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListResourceTypesRequest.
:param x_language: The x_language of this ListResourceTypesRequest.
:type: str
"""
self._x_language = x_language
@property
def resource_type_code(self):
"""Gets the resource_type_code of this ListResourceTypesRequest.
:return: The resource_type_code of this ListResourceTypesRequest.
:rtype: str
"""
return self._resource_type_code
@resource_type_code.setter
def resource_type_code(self, resource_type_code):
"""Sets the resource_type_code of this ListResourceTypesRequest.
:param resource_type_code: The resource_type_code of this ListResourceTypesRequest.
:type: str
"""
self._resource_type_code = resource_type_code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourceTypesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_23684 | import sys, os
import subprocess
#from utilix.rundb import pymongo_collection
from utilix import xent_collection
import re
import time
import datetime
_date = sys.argv[1]
single_calibration = int(sys.argv[2])
date = datetime.datetime(int(_date[:4]), int(_date[4:6]), int(_date[6:]))
runs = sys.argv[3:11]
print('Date: ', date)
print('Runs: ', runs)
print('Single calibration: ', single_calibration)
if len(runs) != 0:
if len(runs) != 8:
check = False
raise SystemExit('Not enough runs!')
compare = True
else:
compare = False
print('Compare? ', compare)
#coll = pymongo_collection()
coll = xent_collection()
check = False
runs_ = [ ]
for i in [0,1,2,3]:
query = {'mode': 'tpc_pmtgain', #'tpc_commissioning_pmtgain',
'comments': {'$elemMatch': {'comment': re.compile(f'.*SPE_calibration_step{i}.*')}},
'tags.name': {'$not': {'$in': ['bad', 'messy', 'abandon']}},
'start': {'$gte': date, '$lt': date + datetime.timedelta(days=1)}
}
cursor = list(coll.find(query, {'number': 1}))
if len(cursor) == 0:
print('SPE cursor 0 so check False')
check = False
break;
else:
check = True
for run in cursor:
runs_.append('0'+str(run['number']))
for i in [0,1,2,3]:
query = {'mode': 'tpc_pmtgain', #'tpc_commissioning_pmtgain',
'comments': {'$elemMatch': {'comment': re.compile(f'.*Gain_calibration_step{i}.*')}},
'tags.name': {'$not': {'$in': ['bad', 'messy', 'abandon']}},
'start': {'$gte': date, '$lt': date + datetime.timedelta(days=1)}
}
cursor = list(coll.find(query, {'number': 1}))
if len(cursor) == 0:
print('GAIN cursor 0 so check False')
check = False
break;
else:
check = True
for run in cursor:
runs_.append('0'+str(run['number']))
print('Runs_: ', runs_)
if (compare)&(single_calibration==1):
print('Let\'s compare: ')
if (runs == runs_):
check = True
else:
check = False
print('Check: ', check)
else:
print('I will not compare. You are doing HV scan, or there are more then one calibration per day or this is cron job.')
if check: sys.exit(1)
else: sys.exit(0)
|
the-stack_106_23685 | from typing import Callable, Optional
from testutils.trees import TreeNode, build_tree
def preorder_string(node: Optional[TreeNode]) -> str:
"""Returns a uniquely identifyable preorder string of the tree"""
if not node:
return ''
return f"# {node.val} {preorder_string(node.left)} {preorder_string(node.right)}"
class Solution:
def isSubtree(self, tree: Optional[TreeNode], subtree: Optional[TreeNode]) -> bool:
return preorder_string(subtree) in preorder_string(tree)
tests = [
(
([3, 4, 5, 1, 2, None, None, None, None, 0], [4, 1, 2],),
False,
),
(
([3, 4, 5, 1, 2], [4, 1, 2],),
True,
),
]
def validator(
isSubtree: Callable[[Optional[TreeNode], Optional[TreeNode]], bool],
inputs: tuple[list[Optional[int]], list[Optional[int]]],
expected: bool,
) -> None:
tree_vals, subtree_vals = inputs
tree = build_tree(tree_vals)
subtree = build_tree(subtree_vals)
output = isSubtree(tree, subtree)
assert output == expected, (output, expected)
|
the-stack_106_23686 | import configparser
import re
import sys
from loguru import logger as logger
class Config:
def __init__(self, config_file: str):
self.config_file = config_file
def check_setting(self, category: str, option: str) -> str:
config = configparser.ConfigParser()
config.read(self.config_file)
selected = config[category][option]
selected = selected.replace('"', '')
selected = selected.replace("'", '')
return selected
# Remove special characters and return remainders
@staticmethod
def remove_special_chars(file_name: str) -> str:
name_list = re.findall(u"[a-zA-Z0-9_+ .]", file_name)
name_list = ''.join(name_list)
if len(name_list) > 20:
logger.error("Invalid file name detected.")
sys.exit("Invalid file name processed.")
return name_list.strip('.')
|
the-stack_106_23691 | import copy
import logging
from typing import List, Tuple
import torch
import wandb
from torch.utils.data import DataLoader, TensorDataset
from torchvision.utils import make_grid
import torchvision.transforms as tfs
from FID.FIDScorer import FIDScorer
from fedml_api.standalone.fedgdkd.ac_gan_model_trainer import ACGANModelTrainer
from fedml_api.standalone.fedgdkd.client import FedGDKDClient
from fedml_api.standalone.fedgdkd.model_trainer import FedGDKDModelTrainer
from fedml_api.standalone.utils.HeterogeneousModelBaseTrainerAPI import HeterogeneousModelBaseTrainerAPI
class FedGDKDAPI(HeterogeneousModelBaseTrainerAPI):
def __init__(self, dataset, device, args, generator, client_models: List[Tuple[torch.nn.Module, int]]):
"""
Args:
dataset: Dataset presplit into data loaders
device: Device to run training on
args: Additional args
client_models: List of client models and their frequency participating (assuming a stateful algorithm for simplicity)
"""
super().__init__(dataset, device, args)
self.mean = torch.Tensor([0.5])
self.std = torch.Tensor([0.5])
self.generator = ACGANModelTrainer(generator, None)
self.generator_model = self.generator.generator
# For logging GAN progress
self.fixed_labels = self.generator_model.generate_balanced_labels(
self.generator_model.num_classes * 8,
device='cpu')
self.fixed_noise = self.generator_model.generate_noise_vector(self.generator_model.num_classes * 8,
device='cpu')
self._setup_clients(self.train_data_local_num_dict, self.train_data_local_dict, self.test_data_local_dict,
client_models)
self._plot_client_training_data_distribution()
# Generate dataset that can be used to calculate FID score
self.FID_source_set = self._generate_train_subset(num_samples=10000)
self.FIDScorer = FIDScorer()
def _setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict,
client_models):
logging.info("############setup_clients (START)#############")
c_idx = 0
for local_model, freq in client_models:
for i in range(freq):
model_trainer = FedGDKDModelTrainer(
copy.deepcopy(self.generator.model),
copy.deepcopy(local_model)
)
c = FedGDKDClient(c_idx, train_data_local_dict[c_idx], test_data_local_dict[c_idx],
train_data_local_num_dict[c_idx], self.test_global, self.args, self.device,
model_trainer)
c_idx += 1
self.client_list.append(c)
logging.info("############setup_clients (END)#############")
def train(self):
w_global = self.generator.get_model_params()
DISTILLATION_DATASET_SIZE = self.args.distillation_dataset_size
distillation_dataset = None
teacher_logits = None
prev_client_subset = None
for round_idx in range(self.args.comm_round):
logging.info("################Communication round : {}".format(round_idx))
client_subset = self._client_sampling(round_idx)
# ---------------
# GAN Training
# ---------------
logging.info('########## Gan Training ########')
w_locals = []
client: FedGDKDClient
for client in client_subset:
# Perform knowledge distillation (model drift correction) on current participating clients
if prev_client_subset is not None and client.client_idx not in prev_client_subset:
logging.info(f"######## KD for new client {client.client_idx} ########")
assert distillation_dataset is not None and teacher_logits is not None, 'Need both to perform KD'
# Calculate teacher logits as mean of logits belonging to other clients
client.classifier_knowledge_distillation(teacher_logits, distillation_dataset)
# Perform local training as usual
w_local = client.train(copy.deepcopy(w_global), round_idx)
w_locals.append(w_local)
# update global weights
w_global = self._aggregate(w_locals)
# self.generator.set_model_params(g_global)
# self.discriminator.set_model_params(d_global)
self.generator.set_model_params(w_global)
# ---------------
# Distillation Phase
# ---------------
logging.info('########## Distillation ########')
# Creating distillation dataset here to save memory but same as if sending noise vector to clients
distillation_dataset = self.generate_fake_dataset(DISTILLATION_DATASET_SIZE)
local_logits = []
logging.info("########## Acquiring distillation logits... #########")
for client in client_subset:
logits = client.get_distillation_logits(copy.deepcopy(w_global), distillation_dataset)
local_logits.append(logits)
logging.info(f"Client {client.client_idx} complete")
# Calculate average soft labels
logging.info(f"######## Knowledge distillation stage ########")
for idx, client in enumerate(client_subset):
# Calculate teacher logits for client
logging.info(f"##### Client {client.client_idx} #####")
teacher_logits = torch.mean(torch.stack(local_logits[:idx] + local_logits[idx + 1:]), dim=0)
teacher_logits = DataLoader(TensorDataset(teacher_logits), batch_size=self.args.batch_size)
client.classifier_knowledge_distillation(teacher_logits, distillation_dataset)
# For next round
teacher_logits = torch.mean(torch.stack(local_logits), dim=0)
teacher_logits = DataLoader(TensorDataset(teacher_logits), batch_size=self.args.batch_size)
prev_client_subset = {c.client_idx for c in client_subset}
if round_idx % 1 == 0:
logging.info("########## Logging generator images... #########")
self.log_gan_images(caption=f'Generator Output, communication round: {round_idx}', round_idx=round_idx)
logging.info("########## Logging generator images... Complete #########")
logging.info("########## Calculating FID Score... #########")
fake = distillation_dataset
if DISTILLATION_DATASET_SIZE != 10000:
fake = self.generate_fake_dataset(DISTILLATION_DATASET_SIZE)
fid_score = self.FIDScorer.calculate_fid(images_real=self.FID_source_set,
images_fake=fake, device=self.device)
if DISTILLATION_DATASET_SIZE != 10000:
del fake
logging.info(f'FID Score: {fid_score}')
wandb.log({'Gen/FID Score Distillation Set': fid_score, 'Round': round_idx})
logging.info("########## Calculating FID Score... Complete #########")
# test results
# at last round
if round_idx == self.args.comm_round - 1:
self._local_test_on_all_clients(round_idx)
# per {frequency_of_the_test} round
elif round_idx % self.args.frequency_of_the_test == 0:
if self.args.dataset.startswith("stackoverflow"):
self._local_test_on_validation_set(round_idx)
else:
self._local_test_on_all_clients(round_idx)
def _aggregate(self, w_locals):
w = 1 / len(w_locals)
averaged_params = w_locals[0]
for k in averaged_params.keys():
for i, local_model_params in enumerate(w_locals):
if i == 0:
averaged_params[k] = local_model_params[k] * w
else:
averaged_params[k] += local_model_params[k] * w
return averaged_params
def log_gan_images(self, caption, round_idx):
images = make_grid(
self.denorm(
self.generator_model(self.fixed_noise.to(self.device), self.fixed_labels.to(self.device))),
nrow=8,
padding=2,
normalize=False,
range=None,
scale_each=False, pad_value=0)
images = wandb.Image(images, caption=caption)
wandb.log({f"Generator Outputs": images, 'Round': round_idx})
def denorm(self, x, channels=None, w=None, h=None, resize=False, device='cpu'):
unnormalize = tfs.Normalize((-self.mean / self.std).tolist(), (1.0 / self.std).tolist()).to(device)
x = unnormalize(x)
if resize:
if channels is None or w is None or h is None:
print('Number of channels, width and height must be provided for resize.')
x = x.view(x.size(0), channels, w, h)
return x
def generate_fake_dataset(self, size):
# Creating distillation dataset here to save memory but same as if sending noise vector to clients
noise_vector = self.generator_model.generate_noise_vector(size, device=self.device)
labels = self.generator_model.generate_balanced_labels(size, device=self.device)
noise_labels = TensorDataset(noise_vector, labels)
noise_labels_loader = DataLoader(noise_labels, batch_size=self.args.batch_size)
synth_data = self.generator.generate_distillation_dataset(noise_labels_loader, device=self.device)
del noise_labels_loader
return DataLoader(TensorDataset(synth_data, labels), batch_size=self.args.batch_size)
|
the-stack_106_23692 | import logging
import spotipy
from interaction_manager.utils import config_helper
logger = logging.getLogger("Spotify Config")
default_spotify_settings = config_helper.get_spotify_settings()
scope = "user-library-read, playlist-read-private, app-remote-control, streaming, " \
"user-read-playback-state, user-modify-playback-state"
username = default_spotify_settings["username"]
client_id = default_spotify_settings["client_id"]
client_secret = default_spotify_settings["client_secret"]
redirect_uri = default_spotify_settings["redirect_uri"]
def connect():
logger.info("Connecting...")
try:
client_credentials_manager = spotipy.SpotifyOAuth(client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
scope=scope,
username=username)
spotify = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
playlists = spotify.user_playlists(username, limit=5)
if playlists is None or len(playlists["items"]) == 0:
logger.warning("*** Couldn't find any playlist! Please try with another username.")
else:
for playlist in playlists["items"]:
# if playlist['owner']['id'] == self.username:
logger.info("Playlist '{}' has {} tracks".format(playlist["name"], playlist["tracks"]["total"]))
logger.info("Successfully connected to spotify.")
except Exception as e:
logger.error("Error while connecting to spotify! {}".format(e))
connect()
|
the-stack_106_23693 | from __future__ import absolute_import, division, print_function
# XXX this is intended to be a simple template for debugging queueing system
# support issues, not a full regression test.
class target(object):
def __init__(self, x):
self.x = x
def __call__(self):
import math
results = []
for n in range(x):
nn = math.sqrt(n**3)
print(nn)
results.append(nn)
def exercise():
from libtbx.queuing_system_utils import generic as queuing
t = target(1000000)
job = queuing.qsub(
target=t,
platform="sge")
job.start()
assert (isinstance(job.jobid, int))
while job.is_alive():
pass
print("done")
if (__name__ == "__main__"):
exercise()
|
the-stack_106_23695 | from tqdm import tqdm
import torch
class Trainer:
def __init__(self, cfg, model, criterion, optimizer, loader):
self.cfg = cfg
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.train_loader = loader['train']
self.val_loader = loader['val']
def train(self):
best_val = -1
for epoch in range(self.cfg.epochs):
self.model.train()
pbar = tqdm(self.train_loader, total=len(self.train_loader), desc='Training')
for idx, data in enumerate(pbar):
if self.cfg.mode == 'ecfp':
tokens, segments, input_mask, length, label, ecfp = data
elif self.cfg.mode == 'smiles':
tokens, segments, input_mask, length, label = data
else:
raise NotImplementedError
tokens, segments = tokens.to(self.cfg.device), segments.to(self.cfg.device)
input_mask, label = input_mask.to(self.cfg.device), label.to(self.cfg.device)
if self.cfg.mode == 'smiles':
pred = self.model(tokens, segments, input_mask)
elif self.cfg.mode == 'ecfp':
ecfp = ecfp.to(self.cfg.device)
pred = self.model(tokens, segments, input_mask, ecfp)
else:
raise NotImplementedError
loss = self.criterion(pred, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if epoch % self.cfg.val_interval == 0 or epoch == (self.cfg.epochs - 1):
val_loss, val_acc = self.evaluate(self.val_loader, 'Validation')
if val_acc > best_val:
torch.save(self.model.state_dict(), self.cfg.save_path + 'best_acc.pt')
best_val = val_acc
print('Best Checkpoint Saved')
print('Train Finished')
def evaluate(self, loader, name):
self.model.eval()
pbar = tqdm(loader, total=len(loader), desc=name)
total_loss, total_acc, n_data = 0, 0, 0
for idx, data in enumerate(pbar):
if self.cfg.mode == 'ecfp':
tokens, segments, input_mask, length, label, ecfp = data
elif self.cfg.mode == 'smiles':
tokens, segments, input_mask, length, label = data
else:
raise NotImplementedError
tokens, segments = tokens.to(self.cfg.device), segments.to(self.cfg.device)
input_mask, label = input_mask.to(self.cfg.device), label.to(self.cfg.device)
with torch.no_grad():
if self.cfg.mode == 'smiles':
pred = self.model(tokens, segments, input_mask)
elif self.cfg.mode == 'ecfp':
ecfp = ecfp.to(self.cfg.device)
pred = self.model(tokens, segments, input_mask, ecfp)
else:
raise NotImplementedError
loss = self.criterion(pred, label)
total_loss += loss.item()
total_acc += torch.eq(torch.argmax(pred, dim=1), label).to(torch.int32).sum().item()
n_data += tokens.size(0)
avg_loss = total_loss / n_data
avg_acc = total_acc / n_data
print(f'{name} Finished - Average loss: {avg_loss}, Average Acc: {avg_acc}')
return avg_loss, avg_acc
|
the-stack_106_23697 | import base64
import os
import sys
import threading
from collections import defaultdict
from functools import partial
from io import BytesIO
from mimetypes import guess_extension
from tempfile import mkstemp
from typing import Any, Union
import numpy as np
import six
from PIL import Image
from ...debugging.log import LoggerRoot
from ..frameworks import _patched_call, WeightsFileHandler, _Empty
from ..import_bind import PostImportHookPatching
from ...config import running_remotely
from ...model import InputModel, OutputModel, Framework
try:
from google.protobuf.json_format import MessageToDict # noqa
except ImportError:
MessageToDict = None
try:
from PIL import GifImagePlugin # noqa
except ImportError:
pass
class TensorflowBinding(object):
@classmethod
def update_current_task(cls, task, patch_reporting=True, patch_model_io=True):
if not task:
IsTensorboardInit.clear_tensorboard_used()
EventTrainsWriter.update_current_task(task)
if patch_reporting:
PatchSummaryToEventTransformer.update_current_task(task)
PatchTensorFlowEager.update_current_task(task)
if patch_model_io:
PatchKerasModelIO.update_current_task(task)
PatchTensorflowModelIO.update_current_task(task)
PatchTensorflow2ModelIO.update_current_task(task)
class IsTensorboardInit(object):
_tensorboard_initialized = False
@classmethod
def tensorboard_used(cls):
return cls._tensorboard_initialized
@classmethod
def set_tensorboard_used(cls):
cls._tensorboard_initialized = True
@classmethod
def clear_tensorboard_used(cls):
cls._tensorboard_initialized = False
@staticmethod
def _patched_tb__init__(original_init, self, *args, **kwargs):
IsTensorboardInit._tensorboard_initialized = True
return original_init(self, *args, **kwargs)
# noinspection PyProtectedMember
class WeightsGradientHistHelper(object):
def __init__(self, logger, report_freq=100, histogram_update_freq_multiplier=10, histogram_granularity=50):
self._logger = logger
self.report_freq = report_freq
self._histogram_granularity = histogram_granularity
self._histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._hist_report_cache = {}
self._hist_x_granularity = 50
@staticmethod
def _sample_histograms(_hist_iters, _histogram_granularity):
# re-sample history based on distribution of samples across time (steps)
ratio = ((_hist_iters[-1] - _hist_iters[_histogram_granularity]) /
(_hist_iters[_histogram_granularity - 1] - _hist_iters[0])) if \
_hist_iters.size > _histogram_granularity else 0.
cur_idx_below = np.arange(0, min(_hist_iters.size, _histogram_granularity - 1))
np.random.shuffle(cur_idx_below)
cur_idx_below = cur_idx_below[:int(_histogram_granularity * (1.0 - ratio / (1 + ratio)) + 0.5)]
if ratio > 0.0:
cur_idx_above = np.arange(_histogram_granularity - 1, _hist_iters.size)
np.random.shuffle(cur_idx_above)
cur_idx_above = cur_idx_above[:int(_histogram_granularity * ratio / (1 + ratio))]
else:
cur_idx_above = np.array([])
_cur_idx = np.unique(np.sort(np.concatenate((cur_idx_below, cur_idx_above)).astype(np.int)))
return _cur_idx
def add_histogram(self, title, series, step, hist_data):
# only collect histogram every specific interval
self._histogram_update_call_counter += 1
if self._histogram_update_call_counter % self.report_freq != 0 or \
self._histogram_update_call_counter < self.report_freq - 1:
return None
if isinstance(hist_data, dict):
pass
elif isinstance(hist_data, np.ndarray) and len(hist_data.shape) == 2 and np.atleast_2d(hist_data).shape[1] == 3:
# prepare the dictionary, assume numpy
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
# notice hist_data[:, 1] is the right side limit, for backwards compatibility we take the left side
hist_data = {'bucketLimit': hist_data[:, 0].tolist(), 'bucket': hist_data[:, 2].tolist()}
else:
# assume we have to do the histogram on the data
hist_data = np.histogram(hist_data, bins=32)
hist_data = {'bucketLimit': hist_data[1].tolist(), 'bucket': hist_data[0].tolist()}
self._add_histogram(title=title, series=series, step=step, hist_data=hist_data)
def _add_histogram(self, title, series, step, hist_data):
# only collect histogram every specific interval
self._histogram_update_call_counter += 1
if self._histogram_update_call_counter % self.report_freq != 0 or \
self._histogram_update_call_counter < self.report_freq - 1:
return None
# generate forward matrix of the histograms
# Y-axis (rows) is iteration (from 0 to current Step)
# X-axis averaged bins (conformed sample 'bucketLimit')
# Z-axis actual value (interpolated 'bucket')
step = EventTrainsWriter._fix_step_counter(title, series, step)
# get histograms from cache
hist_list, hist_iters, minmax = self._hist_report_cache.get((title, series), ([], np.array([]), None))
# resample data so we are always constrained in number of histogram we keep
if hist_iters.size >= self._histogram_granularity ** 2:
idx = self._sample_histograms(hist_iters, self._histogram_granularity)
hist_iters = hist_iters[idx]
hist_list = [hist_list[i] for i in idx]
# check if current sample is not already here (actually happens some times)
if step in hist_iters:
return None
# add current sample, if not already here
hist_iters = np.append(hist_iters, step)
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
hist = np.array(list(zip(hist_data['bucketLimit'], hist_data['bucket'])), dtype=np.float32)
hist = hist[~np.isinf(hist[:, 0]), :]
hist_list.append(hist)
# keep track of min/max values of histograms (for later re-binning)
if minmax is None:
minmax = hist[:, 0].min(), hist[:, 0].max()
else:
# noinspection PyUnresolvedReferences
minmax = min(minmax[0], hist[:, 0].min()), max(minmax[1], hist[:, 0].max())
# update the cache
self._hist_report_cache[(title, series)] = hist_list, hist_iters, minmax
# only report histogram every specific interval, but do report the first few, so you know there are histograms
if hist_iters.size < 1 or (hist_iters.size >= self._histogram_update_freq_multiplier and
hist_iters.size % self._histogram_update_freq_multiplier != 0):
return None
# resample histograms on a unified bin axis +- epsilon
_epsilon = abs((minmax[1] - minmax[0])/float(self._hist_x_granularity))
if _epsilon == 0:
_epsilon = 0.01
_minmax = minmax[0] - _epsilon, minmax[1] + _epsilon
prev_xedge = np.arange(start=_minmax[0],
step=(_minmax[1] - _minmax[0]) / float(self._hist_x_granularity - 2), stop=_minmax[1])
# uniformly select histograms and the last one
cur_idx = self._sample_histograms(hist_iters, self._histogram_granularity)
report_hist = np.zeros(shape=(len(cur_idx), prev_xedge.size), dtype=np.float32)
for i, n in enumerate(cur_idx):
h = hist_list[n]
report_hist[i, :] = np.interp(prev_xedge, h[:, 0], h[:, 1], right=0, left=0)
yedges = hist_iters[cur_idx]
xedges = prev_xedge
# if only a single line make, add another zero line, for the scatter plot to draw
if report_hist.shape[0] < 2:
report_hist = np.vstack((np.zeros_like(report_hist), report_hist))
# create 3d line (scatter) of histograms
skipx = max(1, int(xedges.size / 10))
skipy = max(1, int(yedges.size / 10))
xlabels = ['%.2f' % v if i % skipx == 0 else '' for i, v in enumerate(xedges[:-1])]
ylabels = [str(int(v)) if i % skipy == 0 else '' for i, v in enumerate(yedges)]
self._logger.report_surface(
title=title,
series=series,
iteration=0,
xaxis=' ',
yaxis='iteration',
xlabels=xlabels,
ylabels=ylabels,
matrix=report_hist,
camera=(-0.1, +1.3, 1.4))
# noinspection PyMethodMayBeStatic,PyProtectedMember,SpellCheckingInspection
class EventTrainsWriter(object):
"""
TF SummaryWriter implementation that converts the tensorboard's summary into
ClearML events and reports the events (metrics) for an ClearML task (logger).
"""
__main_task = None
_add_lock = threading.RLock()
_series_name_lookup = {}
# store all the created tensorboard writers in the system
# this allows us to as weather a certain tile/series already exist on some EventWriter
# and if it does, then we add to the series name the last token from the logdir
# (so we can differentiate between the two)
# key, value: key=hash(title, graph), value=EventTrainsWriter._id
_title_series_writers_lookup = {}
_event_writers_id_to_logdir = {}
# Protect against step (iteration) reuse, for example,
# steps counter inside an epoch, but wrapping around when epoch ends
# i.e. step = 0..100 then epoch ends and again step = 0..100
# We store the first report per title/series combination, and if wraparound occurs
# we synthetically continue to increase the step/iteration based on the previous epoch counter
# example: _title_series_wraparound_counter[('title', 'series')] =
# {'first_step':None, 'last_step':None, 'adjust_counter':0,}
_title_series_wraparound_counter = {}
@property
def variants(self):
return self._variants
def prepare_report(self):
return self.variants.copy()
def tag_splitter(self, tag, num_split_parts, split_char='/', join_char='_', default_title='variant',
logdir_header='series', auto_reduce_num_split=False, force_add_prefix=None):
"""
Split a tf.summary tag line to variant and metric.
Variant is the first part of the split tag, metric is the second.
:param str tag:
:param int num_split_parts:
:param str split_char: a character to split the tag on
:param str join_char: a character to join the the splits
:param str default_title: variant to use in case no variant can be inferred automatically
:param str logdir_header: if 'series_last' then series=header: series, if 'series then series=series :header,
if 'title_last' then title=header title, if 'title' then title=title header
:param bool auto_reduce_num_split: if True and the tag is split for less parts then requested,
then requested number of split parts is adjusted.
:param str force_add_prefix: always add the prefix to the series name
:return: (str, str) variant and metric
"""
splitted_tag = tag.split(split_char)
if auto_reduce_num_split and num_split_parts > len(splitted_tag) - 1:
num_split_parts = max(1, len(splitted_tag) - 1)
series = join_char.join(splitted_tag[-num_split_parts:])
title = join_char.join(splitted_tag[:-num_split_parts]) or default_title
if force_add_prefix:
series = str(force_add_prefix)+series
# check if we already decided that we need to change the title/series
graph_id = hash((title, series))
if graph_id in self._graph_name_lookup:
return self._graph_name_lookup[graph_id]
# check if someone other than us used this combination
with self._add_lock:
event_writer_id = self._title_series_writers_lookup.get(graph_id, None)
if not event_writer_id:
# put us there
self._title_series_writers_lookup[graph_id] = self._id
elif event_writer_id != self._id:
# if there is someone else, change our series name and store us
org_series = series
org_title = title
other_logdir = self._event_writers_id_to_logdir[event_writer_id]
split_logddir = self._logdir.split('/')
unique_logdir = set(split_logddir) - set(other_logdir.split('/'))
header = '/'.join(s for s in split_logddir if s in unique_logdir)
if logdir_header == 'series_last':
series = header + ': ' + series
elif logdir_header == 'series':
series = series + ' :' + header
elif logdir_header == 'title':
title = title + ' ' + header
else: # logdir_header == 'title_last':
title = header + ' ' + title
graph_id = hash((title, series))
# check if for some reason the new series is already occupied
new_event_writer_id = self._title_series_writers_lookup.get(graph_id)
if new_event_writer_id is not None and new_event_writer_id != self._id:
# well that's about it, nothing else we could do
if logdir_header == 'series_last':
series = str(self._logdir) + ': ' + org_series
elif logdir_header == 'series':
series = org_series + ' :' + str(self._logdir)
elif logdir_header == 'title':
title = org_title + ' ' + str(self._logdir)
else: # logdir_header == 'title_last':
title = str(self._logdir) + ' ' + org_title
graph_id = hash((title, series))
self._title_series_writers_lookup[graph_id] = self._id
# store for next time
self._graph_name_lookup[graph_id] = (title, series)
return title, series
def __init__(self, logger, logdir=None, report_freq=100, image_report_freq=None,
histogram_update_freq_multiplier=10, histogram_granularity=50, max_keep_images=None):
"""
Create a compatible ClearML backend to the TensorFlow SummaryToEventTransformer
Everything will be serialized directly to the ClearML backend, instead of to the standard TF FileWriter
:param logger: The task.logger to use for sending the metrics (def: task.get_logger())
:param report_freq: How often to update the statistics values
:param image_report_freq: How often to upload images (step % image_update_freq == 0)
:param histogram_update_freq_multiplier: How often to upload histogram
(step//update_freq) % histogram_update_freq_multiplier == 0
:param histogram_granularity: How many histograms (lines) to display in the 3d histogram plot
:param max_keep_images: Maximum number of images to save before starting to reuse files (per title/metric pair)
"""
# We are the events_writer, so that's what we'll pass
IsTensorboardInit.set_tensorboard_used()
self._logdir = logdir or ('unknown %d' % len(self._event_writers_id_to_logdir))
# conform directory structure to unix
if os.path.sep == '\\':
self._logdir = self._logdir.replace('\\', '/')
self._id = hash(self._logdir)
self._event_writers_id_to_logdir[self._id] = self._logdir
self.max_keep_images = max_keep_images
self.report_freq = report_freq
self.image_report_freq = image_report_freq if image_report_freq else report_freq
self.histogram_granularity = histogram_granularity
self.histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._logger = logger
self._visualization_mode = 'RGB' # 'BGR'
self._variants = defaultdict(lambda: ())
self._scalar_report_cache = {}
self._hist_report_cache = {}
self._hist_x_granularity = 50
self._max_step = 0
self._graph_name_lookup = {}
self._generic_tensor_type_name_lookup = {}
self._grad_helper = WeightsGradientHistHelper(
logger=logger,
report_freq=report_freq,
histogram_update_freq_multiplier=histogram_update_freq_multiplier,
histogram_granularity=histogram_granularity
)
def _decode_image(self, img_str, width=None, height=None, color_channels=None):
# noinspection PyBroadException
try:
if isinstance(img_str, bytes):
imdata = img_str
else:
imdata = base64.b64decode(img_str)
output = BytesIO(imdata)
im = Image.open(output)
# if this is a GIF store as is
if getattr(im, 'is_animated', None):
output.close()
fd, temp_file = mkstemp(
suffix=guess_extension(im.get_format_mimetype()) if hasattr(im, 'get_format_mimetype')
else ".{}".format(str(im.format).lower())
)
os.write(fd, imdata)
os.close(fd)
return temp_file
image = np.asarray(im)
output.close()
if height is not None and height > 0 and width is not None and width > 0:
# noinspection PyArgumentList
val = image.reshape(height, width, -1).astype(np.uint8)
else:
val = image.astype(np.uint8)
if val.ndim == 3 and val.shape[2] == 3:
if self._visualization_mode == 'BGR':
val = val[:, :, [2, 1, 0]]
else:
val = val
elif (val.ndim == 2) or (val.ndim == 3 and val.shape[2] == 1):
val = np.tile(np.atleast_3d(val), (1, 1, 3))
elif val.ndim == 3 and val.shape[2] == 4:
if self._visualization_mode == 'BGR':
val = val[:, :, [2, 1, 0]]
else:
val = val[:, :, [0, 1, 2]]
except KeyboardInterrupt:
raise
except Exception as e:
logger = LoggerRoot.get_base_logger(TensorflowBinding)
logger.warning('Failed decoding debug image [%s, %s, %s]' % (width, height, color_channels))
logger.warning('Error: %s' % e)
val = None
return val
def _add_image_numpy(self, tag, step, img_data_np, max_keep_images=None):
# type: (str, int, Union[None, np.ndarray, str], int) -> ()
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if img_data_np is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Images', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
# check if this is a local temp file
if isinstance(img_data_np, str):
self._logger.report_image(
title=title,
series=series,
iteration=step,
local_path=img_data_np,
delete_after_upload=True,
max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images,
)
return
if img_data_np.dtype != np.uint8:
# assume scale 0-1
img_data_np = (img_data_np * 255).astype(np.uint8)
# if 3d, pack into one big image
if img_data_np.ndim == 4:
dims = img_data_np.shape
stack_dim = int(np.sqrt(dims[0]))
# noinspection PyArgumentList
res = img_data_np.reshape(stack_dim, stack_dim, *dims[1:]).transpose((0, 2, 1, 3, 4))
tile_size_h = res.shape[0] * res.shape[1]
tile_size_w = res.shape[2] * res.shape[3]
img_data_np = res.reshape(tile_size_h, tile_size_w, -1)
self._logger.report_image(
title=title,
series=series,
iteration=step,
image=img_data_np,
max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images,
)
def _add_image(self, tag, step, img_data):
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
width = img_data.get('width')
height = img_data.get('height')
colorspace = img_data.get('colorspace')
img_str = img_data['encodedImageString']
matrix = self._decode_image(img_str, width=width, height=height, color_channels=colorspace)
if matrix is None:
return
return self._add_image_numpy(tag=tag, step=step, img_data_np=matrix)
def _add_scalar(self, tag, step, scalar_data):
default_title = tag if not self._logger._get_tensorboard_auto_group_scalars() else 'Scalars'
series_per_graph = self._logger._get_tensorboard_single_series_per_graph()
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag, num_split_parts=1, default_title=default_title,
logdir_header='title' if series_per_graph else 'series_last',
force_add_prefix=self._logger._get_tensorboard_series_prefix()
)
step = self._fix_step_counter(title, series, step)
tag = self._get_add_scalars_event_tag(default_title)
possible_title = tag if series_per_graph else None
possible_tag = None if series_per_graph else tag
title = title + possible_title if possible_title else title
series = possible_tag or series
# update scalar cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
# nan outputs is a string, it's probably a NaN
if isinstance(scalar_data, six.string_types):
# noinspection PyBroadException
try:
scalar_data = float(scalar_data)
except Exception:
scalar_data = float('nan')
# nan outputs nan
self._scalar_report_cache[(title, series)] = \
(num + 1,
(value + scalar_data) if scalar_data == scalar_data else scalar_data)
# only report images every specific interval
if step % self.report_freq != 0:
return None
# calculate mean and zero cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
scalar_data = value / num
self._scalar_report_cache[(title, series)] = (0, 0)
self._logger.report_scalar(
title=title,
series=series,
iteration=step,
value=scalar_data,
)
def _add_histogram(self, tag, step, hist_data):
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=1, default_title='Histograms',
logdir_header='series',
force_add_prefix=self._logger._get_tensorboard_series_prefix())
self._grad_helper.add_histogram(
title=title,
series=series,
step=step,
hist_data=hist_data
)
def _add_plot(self, tag, step, values, vdict):
# noinspection PyBroadException
try:
if values.get('floatVal'):
plot_values = np.array(values.get('floatVal'), dtype=np.float32)
else:
plot_values = np.frombuffer(base64.b64decode(values['tensorContent'].encode('utf-8')),
dtype=np.float32)
plot_values = plot_values.reshape((int(values['tensorShape']['dim'][0]['size']),
int(values['tensorShape']['dim'][1]['size'])))
if 'metadata' in vdict:
if tag not in self._series_name_lookup:
self._series_name_lookup[tag] = [(tag, vdict['metadata'].get('displayName', ''),
vdict['metadata']['pluginData']['pluginName'])]
else:
# this should not happen, maybe it's another run, let increase the value
self._series_name_lookup[tag] += [(tag + '_%d' % (len(self._series_name_lookup[tag]) + 1),
vdict['metadata'].get('displayName', ''),
vdict['metadata']['pluginData']['pluginName'])]
tag, series, plugin_name = self._series_name_lookup.get(tag, [(tag, tag, '')])[-1]
if 'pr_curve' in plugin_name:
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
num_thresholds = plot_values.shape[1]
width = 1.0 / num_thresholds
thresholds = np.arange(0.0, 1.0, width, dtype=plot_values.dtype)
data_points = ['Threshold ', 'TP ', 'FP ', 'TN ', 'FN ', 'Precision ', ' Recall']
series = [{'name': series, 'data': np.vstack((plot_values[-1], plot_values[-2])).T,
'labels': [''.join(data_points) + '<br> {:.3f} '.format(thresholds[j]) +
' '.join(['%-3.2f' % v for v in plot_values[:, j]]) for j in
range(num_thresholds)]}]
reverse_xaxis = False
else:
reverse_xaxis = False
series = [{'name': series, 'data': plot_values}]
self._logger.report_line_plot(title=tag, series=series, xaxis='', yaxis='',
iteration=step, reverse_xaxis=reverse_xaxis)
except Exception:
pass
def _add_audio(self, tag, step, values, audio_data=None):
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if values:
audio_str = values['encodedAudioString']
audio_data = base64.b64decode(audio_str)
if audio_data is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Audio', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
stream = BytesIO(audio_data)
if values:
file_extension = guess_extension(values['contentType']) or \
'.{}'.format(values['contentType'].split('/')[-1])
else:
# assume wav as default
file_extension = '.wav'
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=stream,
file_extension=file_extension,
max_history=self.max_keep_images,
)
def _add_text(self, tag, step, tensor_bytes):
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Text', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
text = tensor_bytes.decode('utf-8', errors='replace')
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=six.StringIO(text),
file_extension='.txt',
max_history=self.max_keep_images,
)
@staticmethod
def _fix_step_counter(title, series, step):
key = (title, series)
if key not in EventTrainsWriter._title_series_wraparound_counter:
EventTrainsWriter._title_series_wraparound_counter[key] = {'first_step': step, 'last_step': step,
'adjust_counter': 0}
return step
wraparound_counter = EventTrainsWriter._title_series_wraparound_counter[key]
# we decide on wrap around if the current step is less than 10% of the previous step
# notice since counter is int and we want to avoid rounding error, we have double check in the if
if step < wraparound_counter['last_step'] and step < 0.9 * wraparound_counter['last_step']:
# adjust step base line
wraparound_counter['adjust_counter'] += wraparound_counter['last_step'] + (1 if step <= 0 else step)
# return adjusted step
wraparound_counter['last_step'] = step
return step + wraparound_counter['adjust_counter']
def add_event(self, event, step=None, walltime=None, **_):
supported_metrics = {
'simpleValue', 'image', 'histo', 'tensor', 'audio'
}
def get_data(value_dict, metric_search_order):
data = None
metric_type = 'Unsupported'
for variant in metric_search_order:
data = value_dict.get(variant)
if data is not None:
metric_type = variant
break
return metric_type, data
# Support multiple threads accessing this instance (i.e. let TF/Keras do what they need)
with self._add_lock:
# TODO: add report frequency threshold (i.e. if we are sending too much data, increase the report_freq)
# we should measure reports per second and throttle back the reporting details accordingly
msg_dict = MessageToDict(event)
summary = msg_dict.get('summary')
if summary is None:
msg_dict.pop('step', None)
msg_dict.pop('wallTime', None)
keys_list = [key for key in msg_dict.keys() if len(key) > 0]
keys_list = ', '.join(keys_list)
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'event summary not found, message type unsupported: %s' % keys_list)
return
value_dicts = summary.get('value')
# noinspection PyUnusedLocal
walltime = walltime or msg_dict.get('step')
step = step or msg_dict.get('step')
if step is None:
# when we start a new epoch there is no step in the msg_dict,
# we have to extract it manually
if hasattr(event, 'step'):
step = int(event.step)
else:
step = 0
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Received event without step, assuming step = {}'.format(step))
else:
step = int(step)
self._max_step = max(self._max_step, step)
if value_dicts is None:
LoggerRoot.get_base_logger(TensorflowBinding).debug("Summary arrived without 'value'")
return
for vdict in value_dicts:
tag = vdict.pop('tag', None)
if tag is None:
# we should not get here
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'No tag for \'value\' existing keys %s' % ', '.join(vdict.keys()))
continue
metric, values = get_data(vdict, supported_metrics)
if metric == 'simpleValue':
self._add_scalar(tag=tag, step=step, scalar_data=values)
elif metric == 'histo':
self._add_histogram(tag=tag, step=step, hist_data=values)
elif metric == 'image':
self._add_image(tag=tag, step=step, img_data=values)
elif metric == 'audio':
self._add_audio(tag, step, values)
elif metric == 'tensor' and values.get('dtype') == 'DT_STRING':
# generic tensor
tensor_bytes = base64.b64decode('\n'.join(values['stringVal']))
plugin_type = self._generic_tensor_type_name_lookup.get(tag) or \
vdict.get('metadata', {}).get('pluginData', {}).get('pluginName', '').lower()
if plugin_type == 'audio':
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_audio(tag, step, None, tensor_bytes)
elif plugin_type == 'text':
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_text(tag, step, tensor_bytes)
else:
# we do not support it
pass
elif metric == 'tensor' and values.get('dtype') == 'DT_FLOAT':
self._add_plot(tag, step, values, vdict)
else:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Event unsupported. tag = %s, vdict keys [%s]' % (tag, ', '.join(vdict.keys())))
continue
def get_logdir(self):
""" Returns a temporary directory name for compatibility with FileWriter. This directory is not actually used.
:return: '.'
"""
return '.'
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._logger.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self._logger.flush()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
pass
def _get_add_scalars_event_tag(self, title_prefix):
"""
:param str title_prefix: the table title prefix that was added to the series.
:return: str same as tensorboard use
"""
# HACK - this is tensorboard Summary util function, original path:
# ~/torch/utils/tensorboard/summary.py
def _clean_tag(name):
import re as _re
# noinspection RegExpRedundantEscape
_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Summary name %s is illegal; using %s instead.' % (name, new_name))
name = new_name
return name
main_path = self._logdir
# noinspection PyBroadException
try:
main_path = _clean_tag(main_path)
origin_tag = main_path.rpartition("/")[2].replace(title_prefix, "", 1)
if title_prefix and origin_tag[0] == "_": # add_scalars tag
origin_tag = origin_tag[1:] # Remove the first "_" that was added by the main_tag in tensorboard
else:
return ""
except Exception:
origin_tag = ""
return origin_tag
@classmethod
def update_current_task(cls, task):
if cls.__main_task != task:
with cls._add_lock:
cls._series_name_lookup = {}
cls._title_series_writers_lookup = {}
cls._event_writers_id_to_logdir = {}
cls._title_series_wraparound_counter = {}
cls.__main_task = task
# noinspection PyCallingNonCallable
class ProxyEventsWriter(object):
def __init__(self, events):
IsTensorboardInit.set_tensorboard_used()
self._events = events
def _get_sentinel_event(self):
ret = None
for ev in self._events:
if hasattr(ev, '_get_sentinel_event'):
# noinspection PyProtectedMember
ret = ev._get_sentinel_event()
return ret
def get_logdir(self):
ret = None
for ev in self._events:
if hasattr(ev, 'get_logdir'):
ret = ev.get_logdir()
return ret
def reopen(self):
ret = None
for ev in self._events:
if hasattr(ev, 'reopen'):
ret = ev.reopen()
return ret
def add_event(self, *args, **kwargs):
ret = None
for ev in self._events:
if hasattr(ev, 'add_event'):
ret = ev.add_event(*args, **kwargs)
return ret
def flush(self):
ret = None
for ev in self._events:
if hasattr(ev, 'flush'):
ret = ev.flush()
return ret
def close(self):
ret = None
for ev in self._events:
if hasattr(ev, 'close'):
ret = ev.close()
return ret
# noinspection PyPep8Naming
class PatchSummaryToEventTransformer(object):
__main_task = None
__original_getattribute = None
__original_getattributeX = None
_original_add_event = None
_original_add_eventT = None
_original_add_eventX = None
defaults_dict = dict(
report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5,
histogram_granularity=50)
@staticmethod
def trains_object(self):
if isinstance(self.event_writer, ProxyEventsWriter):
# noinspection PyProtectedMember
trains_writer = [e for e in self.event_writer._events if isinstance(e, EventTrainsWriter)]
return trains_writer[0] if trains_writer else None
elif isinstance(self.event_writer, EventTrainsWriter):
return self.event_writer
if not self.__dict__.get('_trains_defaults'):
self.__dict__['_trains_defaults'] = {}
return self.__dict__['_trains_defaults']
@staticmethod
def update_current_task(task, **kwargs):
PatchSummaryToEventTransformer.defaults_dict.update(kwargs)
PatchSummaryToEventTransformer.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchSummaryToEventTransformer._patch_summary_to_event_transformer()
PostImportHookPatching.add_on_import('tensorflow',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
PostImportHookPatching.add_on_import('torch',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
PostImportHookPatching.add_on_import('tensorboardX',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
@staticmethod
def _patch_summary_to_event_transformer():
if 'tensorflow' in sys.modules:
try:
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer # noqa
# only patch once
if PatchSummaryToEventTransformer.__original_getattribute is None:
PatchSummaryToEventTransformer.__original_getattribute = SummaryToEventTransformer.__getattribute__
SummaryToEventTransformer.__getattribute__ = PatchSummaryToEventTransformer._patched_getattribute
setattr(SummaryToEventTransformer, 'clearml',
property(PatchSummaryToEventTransformer.trains_object))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if 'torch' in sys.modules:
try:
# only patch once
if PatchSummaryToEventTransformer._original_add_eventT is None:
# noinspection PyUnresolvedReferences
from torch.utils.tensorboard.writer import FileWriter as FileWriterT # noqa
PatchSummaryToEventTransformer._original_add_eventT = FileWriterT.add_event
FileWriterT.add_event = PatchSummaryToEventTransformer._patched_add_eventT
setattr(FileWriterT, 'clearml', None)
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if 'tensorboardX' in sys.modules:
try:
# only patch once
if PatchSummaryToEventTransformer.__original_getattributeX is None:
# noinspection PyUnresolvedReferences
from tensorboardX.writer import SummaryToEventTransformer as SummaryToEventTransformerX # noqa
PatchSummaryToEventTransformer.__original_getattributeX = \
SummaryToEventTransformerX.__getattribute__
SummaryToEventTransformerX.__getattribute__ = PatchSummaryToEventTransformer._patched_getattributeX
setattr(SummaryToEventTransformerX, 'clearml',
property(PatchSummaryToEventTransformer.trains_object))
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if PatchSummaryToEventTransformer.__original_getattributeX is None:
try:
# only patch once
if PatchSummaryToEventTransformer._original_add_eventX is None:
from tensorboardX.writer import FileWriter as FileWriterX # noqa
PatchSummaryToEventTransformer._original_add_eventX = FileWriterX.add_event
FileWriterX.add_event = PatchSummaryToEventTransformer._patched_add_eventX
setattr(FileWriterX, 'clearml', None)
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
@staticmethod
def _patched_add_eventT(self, *args, **kwargs):
if not hasattr(self, 'clearml') or not PatchSummaryToEventTransformer.__main_task:
return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs)
if not self.clearml: # noqa
# noinspection PyBroadException
try:
logdir = self.get_logdir()
except Exception:
logdir = None
self.clearml = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **PatchSummaryToEventTransformer.defaults_dict)
# noinspection PyBroadException
try:
self.clearml.add_event(*args, **kwargs)
except Exception:
pass
return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs)
@staticmethod
def _patched_add_eventX(self, *args, **kwargs):
if not hasattr(self, 'clearml') or not PatchSummaryToEventTransformer.__main_task:
return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs)
if not self.clearml:
# noinspection PyBroadException
try:
logdir = self.get_logdir()
except Exception:
logdir = None
self.clearml = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **PatchSummaryToEventTransformer.defaults_dict)
# noinspection PyBroadException
try:
self.clearml.add_event(*args, **kwargs)
except Exception:
pass
return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs)
@staticmethod
def _patched_getattribute(self, attr):
get_base = PatchSummaryToEventTransformer.__original_getattribute
return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base)
@staticmethod
def _patched_getattributeX(self, attr):
get_base = PatchSummaryToEventTransformer.__original_getattributeX
return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base)
@staticmethod
def _patched_getattribute_(self, attr, get_base):
# no main task, zero chance we have an ClearML event logger
if PatchSummaryToEventTransformer.__main_task is None:
return get_base(self, attr)
# check if we already have an ClearML event logger
__dict__ = get_base(self, '__dict__')
if 'event_writer' not in __dict__ or \
isinstance(__dict__['event_writer'], (ProxyEventsWriter, EventTrainsWriter)):
return get_base(self, attr)
# patch the events writer field, and add a double Event Logger (ClearML and original)
base_eventwriter = __dict__['event_writer']
# noinspection PyBroadException
try:
logdir = base_eventwriter.get_logdir()
except Exception:
logdir = None
defaults_dict = __dict__.get('_trains_defaults') or PatchSummaryToEventTransformer.defaults_dict
trains_event = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **defaults_dict)
# order is important, the return value of ProxyEventsWriter is the last object in the list
__dict__['event_writer'] = ProxyEventsWriter([trains_event, base_eventwriter])
return get_base(self, attr)
class _ModelAdapter(object):
""" Model adapter which extends the save and save_weights methods of a Keras Model instance """
_model = None # type: Any
_output_model = None # type: OutputModel
def __init__(self, model, output_model):
super(_ModelAdapter, self).__init__()
super(_ModelAdapter, self).__setattr__('_model', model)
super(_ModelAdapter, self).__setattr__('_output_model', output_model)
super(_ModelAdapter, self).__setattr__('_logger', LoggerRoot.get_base_logger(TensorflowBinding))
def __getattr__(self, attr):
return getattr(self._model, attr)
def __setattr__(self, key, value):
return setattr(self._model, key, value)
def save(self, filepath, overwrite=True, include_optimizer=True):
self._model.save(filepath=filepath, overwrite=overwrite, include_optimizer=include_optimizer)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
def save_weights(self, filepath, overwrite=True):
self._model.save_weights(filepath=filepath, overwrite=overwrite)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
class PatchModelCheckPointCallback(object):
__main_task = None
__original_getattribute = None
defaults_dict = dict(
config_text=None,
config_dict=None,
label_enumeration=None,
name=None,
comment=None)
@staticmethod
def trains_object(self):
if isinstance(self.model, _ModelAdapter):
# noinspection PyProtectedMember
return self.model._output_model
if not self.__dict__.get('_trains_defaults'):
self.__dict__['_trains_defaults'] = {}
return self.__dict__['_trains_defaults']
@staticmethod
def update_current_task(task, **kwargs):
PatchModelCheckPointCallback.defaults_dict.update(kwargs)
PatchModelCheckPointCallback.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchModelCheckPointCallback._patch_model_checkpoint()
PostImportHookPatching.add_on_import('keras', PatchModelCheckPointCallback._patch_model_checkpoint)
PostImportHookPatching.add_on_import('tensorflow', PatchModelCheckPointCallback._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
is_keras = 'keras' in sys.modules
is_tf_keras = 'tensorflow' in sys.modules
callbacks = None
if is_keras:
try:
import keras.callbacks as callbacks # noqa
except ImportError:
is_keras = False
if not is_keras and is_tf_keras:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
import tensorflow.python.keras.callbacks as callbacks # noqa
except ImportError:
is_tf_keras = False
callbacks = None
# we have nothing, quit
if not is_keras and not is_tf_keras:
return
try:
# only patch once
if PatchModelCheckPointCallback.__original_getattribute is None and callbacks is not None:
PatchModelCheckPointCallback.__original_getattribute = callbacks.ModelCheckpoint.__getattribute__
callbacks.ModelCheckpoint.__getattribute__ = PatchModelCheckPointCallback._patched_getattribute
setattr(callbacks.ModelCheckpoint, 'clearml',
property(PatchModelCheckPointCallback.trains_object))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _patched_getattribute(self, attr):
get_base = PatchModelCheckPointCallback.__original_getattribute
# no main task, zero chance we have an ClearML event logger
if PatchModelCheckPointCallback.__main_task is None:
return get_base(self, attr)
# check if we already have an ClearML event logger
__dict__ = get_base(self, '__dict__')
if 'model' not in __dict__ or \
isinstance(__dict__['model'], _ModelAdapter):
return get_base(self, attr)
# patch the events writer field, and add a double Event Logger (ClearML and original)
base_model = __dict__['model']
defaults_dict = __dict__.get('_trains_defaults') or PatchModelCheckPointCallback.defaults_dict
output_model = OutputModel(
PatchModelCheckPointCallback.__main_task,
config_text=defaults_dict.get('config_text'),
config_dict=defaults_dict.get('config_dict'),
name=defaults_dict.get('name'),
comment=defaults_dict.get('comment'),
label_enumeration=defaults_dict.get('label_enumeration') or
PatchModelCheckPointCallback.__main_task.get_labels_enumeration(),
framework=Framework.keras,
)
output_model.set_upload_destination(
PatchModelCheckPointCallback.__main_task.get_output_destination(raise_on_error=False))
trains_model = _ModelAdapter(base_model, output_model)
# order is important, the return value of ProxyEventsWriter is the last object in the list
__dict__['model'] = trains_model
return get_base(self, attr)
# noinspection PyProtectedMember,PyUnresolvedReferences
class PatchTensorFlowEager(object):
__main_task = None
__original_fn_scalar = None
__original_fn_hist = None
__original_fn_image = None
__trains_event_writer = {}
defaults_dict = dict(
report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5,
histogram_granularity=50)
@staticmethod
def update_current_task(task, **kwargs):
if task != PatchTensorFlowEager.__main_task:
PatchTensorFlowEager.__trains_event_writer = {}
PatchTensorFlowEager.defaults_dict.update(kwargs)
PatchTensorFlowEager.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchTensorFlowEager._patch_summary_ops()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorFlowEager._patch_summary_ops)
@staticmethod
def _patch_summary_ops():
if PatchTensorFlowEager.__original_fn_scalar is not None:
return
if 'tensorflow' in sys.modules:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.ops import gen_summary_ops # noqa
PatchTensorFlowEager.__original_fn_scalar = gen_summary_ops.write_scalar_summary
gen_summary_ops.write_scalar_summary = PatchTensorFlowEager._write_scalar_summary
PatchTensorFlowEager.__original_fn_image = gen_summary_ops.write_image_summary
gen_summary_ops.write_image_summary = PatchTensorFlowEager._write_image_summary
PatchTensorFlowEager.__original_fn_hist = gen_summary_ops.write_histogram_summary
gen_summary_ops.write_histogram_summary = PatchTensorFlowEager._write_hist_summary
PatchTensorFlowEager.__write_summary = gen_summary_ops.write_summary
gen_summary_ops.write_summary = PatchTensorFlowEager._write_summary
gen_summary_ops.create_summary_file_writer = partial(IsTensorboardInit._patched_tb__init__,
gen_summary_ops.create_summary_file_writer)
gen_summary_ops.create_summary_db_writer = partial(IsTensorboardInit._patched_tb__init__,
gen_summary_ops.create_summary_db_writer)
except ImportError:
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
@staticmethod
def _get_event_writer(writer):
if not PatchTensorFlowEager.__main_task:
return None
if not PatchTensorFlowEager.__trains_event_writer.get(id(writer)):
# noinspection PyBroadException
try:
logdir = writer.get_logdir()
except Exception:
# check if we are in eager mode, let's get the global context lopdir
# noinspection PyBroadException
try:
from tensorflow.python.eager import context # noqa
logdir = context.context().summary_writer._init_op_fn.keywords.get('logdir')
except Exception:
# noinspection PyBroadException
try:
from tensorflow.python.ops.summary_ops_v2 import _summary_state # noqa
logdir = _summary_state.writer._init_op_fn.keywords.get('logdir')
except Exception:
logdir = None
# noinspection PyBroadException
try:
if logdir is not None:
logdir = logdir.numpy().decode()
except Exception:
logdir = None
PatchTensorFlowEager.__trains_event_writer[id(writer)] = EventTrainsWriter(
logger=PatchTensorFlowEager.__main_task.get_logger(), logdir=logdir,
**PatchTensorFlowEager.defaults_dict)
return PatchTensorFlowEager.__trains_event_writer[id(writer)]
@staticmethod
def trains_object(self):
if not PatchTensorFlowEager.__trains_event_writer:
return None
return PatchTensorFlowEager.__trains_event_writer.get(
id(self), list(PatchTensorFlowEager.__trains_event_writer.values())[0])
@staticmethod
def _write_summary(writer, step, tensor, tag, summary_metadata, name=None, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
# make sure we can get the tensors values
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
# noinspection PyBroadException
try:
plugin_type = summary_metadata.decode()
# remove any none alpha numeric value
plugin_type = plugin_type[next(i for i, c in enumerate(plugin_type) if c >= 'A'):]
if plugin_type.startswith('scalars'):
event_writer._add_scalar(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
scalar_data=tensor.numpy())
elif plugin_type.startswith('images'):
img_data_np = tensor.numpy()
PatchTensorFlowEager._add_image_event_helper(event_writer, img_data_np=img_data_np,
tag=tag, step=step, **kwargs)
elif plugin_type.startswith('histograms'):
event_writer._add_histogram(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
hist_data=tensor.numpy()
)
elif plugin_type.startswith('text'):
event_writer._add_text(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
tensor_bytes=tensor.numpy()
)
elif 'audio' in plugin_type:
audio_bytes_list = [a for a in tensor.numpy().flatten() if a]
for i, audio_bytes in enumerate(audio_bytes_list):
event_writer._add_audio(tag=str(tag) + ('/{}'.format(i) if len(audio_bytes_list) > 1 else ''),
step=int(step.numpy()) if not isinstance(step, int) else step,
values=None, audio_data=audio_bytes)
else:
pass # print('unsupported plugin_type', plugin_type)
except Exception:
pass
return PatchTensorFlowEager.__write_summary(writer, step, tensor, tag, summary_metadata, name, **kwargs)
@staticmethod
def _write_scalar_summary(writer, step, tag, value, name=None, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
event_writer._add_scalar(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
scalar_data=value.numpy())
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_value, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
event_writer._add_scalar(
tag=str_tag,
step=int(a_step.numpy()) if not isinstance(a_step, int) else a_step,
scalar_data=a_value.numpy())
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, value, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_scalar(writer, step, tag, value, name, **kwargs)
@staticmethod
def _write_hist_summary(writer, step, tag, values, name, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
event_writer._add_histogram(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
hist_data=values.numpy()
)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_value, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
event_writer._add_histogram(
tag=str_tag,
step=int(a_step.numpy()) if not isinstance(a_step, int) else a_step,
hist_data=a_value.numpy()
)
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, values, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_hist(writer, step, tag, values, name, **kwargs)
@staticmethod
def _write_image_summary(writer, step, tag, tensor, bad_color, max_images, name, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
PatchTensorFlowEager._add_image_event_helper(event_writer, img_data_np=tensor.numpy(),
tag=tag, step=step, **kwargs)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_tensor, a_bad_color, a_max_images, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
PatchTensorFlowEager._add_image_event_helper(
event_writer, img_data_np=a_tensor.numpy(),
tag=str_tag, step=a_step, **kwargs)
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, tensor, bad_color, max_images, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_image(
writer, step, tag, tensor, bad_color, max_images, name, **kwargs)
@staticmethod
def _add_image_event_helper(event_writer, img_data_np, tag, step, **kwargs):
if img_data_np.ndim == 1 and img_data_np.size >= 3 and \
(len(img_data_np[0]) < 10 and len(img_data_np[1]) < 10):
# this is just for making sure these are actually valid numbers
width = int(img_data_np[0].decode()) # noqa: F841
height = int(img_data_np[1].decode()) # noqa: F841
for i in range(2, img_data_np.size):
img_data = {'width': None, 'height': None,
'colorspace': 'RGB', 'encodedImageString': img_data_np[i]}
image_tag = str(tag) + '/sample_{}'.format(i - 2) if img_data_np.size > 3 else str(tag)
event_writer._add_image(tag=image_tag,
step=int(step.numpy()) if not isinstance(step, int) else step,
img_data=img_data)
else:
event_writer._add_image_numpy(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
img_data_np=img_data_np,
max_keep_images=kwargs.get('max_images'))
@staticmethod
def _nothing_op(*_, **__):
"""Convenient else branch for when summaries do not record."""
from tensorflow.python.framework import constant_op
return constant_op.constant(False)
# noinspection PyPep8Naming,SpellCheckingInspection
class PatchKerasModelIO(object):
__main_task = None
__patched_keras = None
__patched_tensorflow = None
@staticmethod
def update_current_task(task, **_):
PatchKerasModelIO.__main_task = task
PatchKerasModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchKerasModelIO._patch_model_checkpoint)
PostImportHookPatching.add_on_import('keras', PatchKerasModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if 'keras' in sys.modules and not PatchKerasModelIO.__patched_keras:
try:
from keras.engine.network import Network # noqa
except ImportError:
Network = None
try:
from keras.engine.functional import Functional # noqa
except ImportError:
Functional = None
try:
from keras.engine.sequential import Sequential # noqa
except ImportError:
Sequential = None
try:
from keras import models as keras_saving # noqa
except ImportError:
keras_saving = None
# check that we are not patching anything twice
if PatchKerasModelIO.__patched_tensorflow:
PatchKerasModelIO.__patched_keras = [
Network if PatchKerasModelIO.__patched_tensorflow[0] != Network else None,
Sequential if PatchKerasModelIO.__patched_tensorflow[1] != Sequential else None,
keras_saving if PatchKerasModelIO.__patched_tensorflow[2] != keras_saving else None,
Functional if PatchKerasModelIO.__patched_tensorflow[3] != Functional else None,
None,
None,
]
else:
PatchKerasModelIO.__patched_keras = [Network, Sequential, keras_saving, Functional, None, None]
PatchKerasModelIO._patch_io_calls(*PatchKerasModelIO.__patched_keras)
if 'tensorflow' in sys.modules and not PatchKerasModelIO.__patched_tensorflow:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.network import Network # noqa
except ImportError:
Network = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.functional import Functional # noqa
except ImportError:
Functional = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.sequential import Sequential # noqa
except ImportError:
Sequential = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras import models as keras_saving_legacy # noqa
except ImportError:
keras_saving_legacy = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.keras import models as keras_saving # noqa
except ImportError:
keras_saving = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.saving import hdf5_format as keras_hdf5 # noqa
except ImportError:
keras_hdf5 = None
if PatchKerasModelIO.__patched_keras:
PatchKerasModelIO.__patched_tensorflow = [
Network if PatchKerasModelIO.__patched_keras[0] != Network else None,
Sequential if PatchKerasModelIO.__patched_keras[1] != Sequential else None,
keras_saving if PatchKerasModelIO.__patched_keras[2] != keras_saving else None,
Functional if PatchKerasModelIO.__patched_keras[3] != Functional else None,
keras_saving_legacy if PatchKerasModelIO.__patched_keras[4] != keras_saving_legacy else None,
keras_hdf5 if PatchKerasModelIO.__patched_keras[5] != keras_hdf5 else None,
]
else:
PatchKerasModelIO.__patched_tensorflow = [
Network, Sequential, keras_saving, Functional, keras_saving_legacy, keras_hdf5]
PatchKerasModelIO._patch_io_calls(*PatchKerasModelIO.__patched_tensorflow)
@staticmethod
def _patch_io_calls(Network, Sequential, keras_saving, Functional, keras_saving_legacy=None, keras_hdf5=None):
try:
if Sequential is not None:
Sequential._updated_config = _patched_call(Sequential._updated_config,
PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Sequential.from_config = classmethod(_patched_call(Sequential.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Sequential.from_config = _patched_call(Sequential.from_config, PatchKerasModelIO._from_config)
if Network is not None:
Network._updated_config = _patched_call(Network._updated_config, PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Network.from_config = classmethod(_patched_call(Network.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Network.from_config = _patched_call(Network.from_config, PatchKerasModelIO._from_config)
Network.save = _patched_call(Network.save, PatchKerasModelIO._save)
Network.save_weights = _patched_call(Network.save_weights, PatchKerasModelIO._save_weights)
Network.load_weights = _patched_call(Network.load_weights, PatchKerasModelIO._load_weights)
elif Functional is not None:
Functional._updated_config = _patched_call(
Functional._updated_config, PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Functional.from_config = classmethod(_patched_call(Functional.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Functional.from_config = _patched_call(Functional.from_config, PatchKerasModelIO._from_config)
Functional.save = _patched_call(Functional.save, PatchKerasModelIO._save)
Functional.save_weights = _patched_call(Functional.save_weights, PatchKerasModelIO._save_weights)
Functional.load_weights = _patched_call(Functional.load_weights, PatchKerasModelIO._load_weights)
if keras_saving is not None:
keras_saving.save_model = _patched_call(keras_saving.save_model, PatchKerasModelIO._save_model)
keras_saving.load_model = _patched_call(keras_saving.load_model, PatchKerasModelIO._load_model)
if keras_saving_legacy is not None:
keras_saving_legacy.save_model = _patched_call(
keras_saving_legacy.save_model, PatchKerasModelIO._save_model)
keras_saving_legacy.load_model = _patched_call(
keras_saving_legacy.load_model, PatchKerasModelIO._load_model)
if keras_hdf5 is not None:
keras_hdf5.save_weights_to_hdf5_group = _patched_call(
keras_hdf5.save_weights_to_hdf5_group, PatchKerasModelIO._save_weights)
keras_hdf5.load_weights_from_hdf5_group = _patched_call(
keras_hdf5.load_weights_from_hdf5_group, PatchKerasModelIO._load_weights)
keras_hdf5.load_weights_from_hdf5_group_by_name = _patched_call(
keras_hdf5.load_weights_from_hdf5_group_by_name, PatchKerasModelIO._load_weights)
if hasattr(keras_hdf5, 'load_model_from_hdf5'):
keras_hdf5.load_model_from_hdf5 = _patched_call(
keras_hdf5.load_model_from_hdf5, PatchKerasModelIO._load_model)
if hasattr(keras_hdf5, 'save_model_to_hdf5'):
keras_hdf5.save_model_to_hdf5 = _patched_call(
keras_hdf5.save_model_to_hdf5, PatchKerasModelIO._save_model)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _updated_config(original_fn, self):
config = original_fn(self)
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return config
try:
# there is no actual file, so we create the OutputModel without one
# check if object already has InputModel
if not hasattr(self, 'trains_out_model'):
self.trains_out_model = []
# check if object already has InputModel
model_name_id = config.get('name', getattr(self, 'name', 'unknown'))
if self.trains_out_model:
self.trains_out_model[-1].config_dict = config
else:
# todo: support multiple models for the same task
self.trains_out_model.append(OutputModel(
task=PatchKerasModelIO.__main_task,
config_dict=config,
name=PatchKerasModelIO.__main_task.name + ' ' + model_name_id,
label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
framework=Framework.keras,
))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return config
@staticmethod
def _from_config(original_fn, *args, **kwargs):
try:
self = original_fn(*args, **kwargs)
except Exception as ex:
if not running_remotely():
raise ex
self = _Empty()
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return self
try:
# check if object already has InputModel
if not hasattr(self, 'trains_in_model'):
self.trains_in_model = None
# get config
config_dict = kwargs['config'] if 'config' in kwargs else args[0]
# check if object already has InputModel
self.trains_in_model = InputModel.empty(
config_dict=config_dict,
label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
)
# todo: support multiple models for the same task
PatchKerasModelIO.__main_task.connect(self.trains_in_model)
# if we are running remotely we should deserialize the object
# because someone might have changed the configuration
# Hack: disabled
if False and running_remotely():
# reload the model
model_config = self.trains_in_model.config_dict
# verify that this is the same model so we are not deserializing a diff model
if (config_dict and config_dict.get('config') and model_config and model_config.get('config') and
config_dict.get('config').get('name') == model_config.get('config').get('name')) or \
(not config_dict and not model_config):
if 'config' in kwargs:
kwargs['config'] = model_config
else:
args = (model_config,) + args[1:]
model = original_fn(*args, **kwargs)
model.trains_in_model = self.trains_in_model
return model
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return self
@staticmethod
def _load_weights(original_fn, self, *args, **kwargs):
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return original_fn(self, *args, **kwargs)
# get filepath
if self and getattr(self, 'filename', None):
filepath = getattr(self, 'filename', None)
else:
filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0]
# Hack: disabled
if False and running_remotely():
# register/load model weights
filepath = WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras,
PatchKerasModelIO.__main_task)
if 'filepath' in kwargs:
kwargs['filepath'] = filepath
else:
args = (filepath,) + args[1:]
# load model
return original_fn(self, *args, **kwargs)
# try to load the files, if something happened exception will be raised before we register the file
model = original_fn(self, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras, PatchKerasModelIO.__main_task)
return model
@staticmethod
def _save(original_fn, self, *args, **kwargs):
if hasattr(self, 'trains_out_model') and self.trains_out_model:
# noinspection PyProtectedMember
self.trains_out_model[-1]._processed = False
original_fn(self, *args, **kwargs)
# no need to specially call, because the original save uses "save_model" which we overload
# noinspection PyProtectedMember
if not hasattr(self, 'trains_out_model') or not self.trains_out_model or \
not hasattr(self.trains_out_model[-1], '_processed') or not self.trains_out_model[-1]._processed:
PatchKerasModelIO._update_outputmodel(self, *args, **kwargs)
@staticmethod
def _save_weights(original_fn, self, *args, **kwargs):
original_fn(self, *args, **kwargs)
PatchKerasModelIO._update_outputmodel(self, *args, **kwargs)
@staticmethod
def _update_outputmodel(self, *args, **kwargs):
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return
try:
# get filepath
if self and getattr(self, 'filename', None):
filepath = getattr(self, 'filename', None)
else:
filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0]
# this will already generate an output model
# noinspection PyBroadException
try:
config = self._updated_config()
except Exception:
# we failed to convert the network to json, for some reason (most likely internal keras error)
config = {}
if filepath:
WeightsFileHandler.create_output_model(
self, filepath, Framework.keras, PatchKerasModelIO.__main_task,
config_obj=config or None, singlefile=True)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _save_model(original_fn, model, filepath, *args, **kwargs):
original_fn(model, filepath, *args, **kwargs)
if PatchKerasModelIO.__main_task:
PatchKerasModelIO._update_outputmodel(model, filepath)
@staticmethod
def _load_model(original_fn, filepath, *args, **kwargs):
if not PatchKerasModelIO.__main_task:
return original_fn(filepath, *args, **kwargs)
empty = _Empty()
# Hack: disabled
if False and running_remotely():
# register/load model weights
filepath = WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras,
PatchKerasModelIO.__main_task)
model = original_fn(filepath, *args, **kwargs)
else:
model = original_fn(filepath, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras, PatchKerasModelIO.__main_task)
# update the input model object
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
class PatchTensorflowModelIO(object):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchTensorflowModelIO.__main_task = task
PatchTensorflowModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorflowModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if PatchTensorflowModelIO.__patched:
return
if 'tensorflow' not in sys.modules:
return
PatchTensorflowModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
# noinspection PyUnresolvedReferences
from tensorflow.python.training.saver import Saver # noqa
# noinspection PyBroadException
try:
Saver.save = _patched_call(Saver.save, PatchTensorflowModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
Saver.restore = _patched_call(Saver.restore, PatchTensorflowModelIO._restore)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
from tensorflow.saved_model import save # noqa
# actual import
from tensorflow.python.saved_model import save as saved_model # noqa
except ImportError:
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
from tensorflow.saved_model.experimental import save # noqa
# actual import
import tensorflow.saved_model.experimental as saved_model # noqa
except ImportError:
saved_model = None
except Exception:
saved_model = None
except Exception:
saved_model = None
if saved_model is not None:
saved_model.save = _patched_call(saved_model.save, PatchTensorflowModelIO._save_model)
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
from tensorflow.saved_model import load # noqa
# noinspection PyUnresolvedReferences
import tensorflow.saved_model as saved_model_load # noqa
saved_model_load.load = _patched_call(saved_model_load.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
# noinspection PyUnresolvedReferences
from tensorflow.saved_model import loader as loader1 # noqa
loader1.load = _patched_call(loader1.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
# noinspection PyUnresolvedReferences
from tensorflow.compat.v1.saved_model import loader as loader2 # noqa
loader2.load = _patched_call(loader2.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
import tensorflow # noqa
from tensorflow.train import Checkpoint # noqa
# noinspection PyBroadException
try:
Checkpoint.save = _patched_call(Checkpoint.save, PatchTensorflowModelIO._ckpt_save)
except Exception:
pass
# noinspection PyBroadException
try:
Checkpoint.restore = _patched_call(Checkpoint.restore, PatchTensorflowModelIO._ckpt_restore)
except Exception:
pass
# noinspection PyBroadException
try:
Checkpoint.write = _patched_call(Checkpoint.write, PatchTensorflowModelIO._ckpt_write)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
@staticmethod
def _save(original_fn, self, sess, save_path, *args, **kwargs):
saved_path = original_fn(self, sess, save_path, *args, **kwargs)
if not saved_path:
return saved_path
# store output Model
return WeightsFileHandler.create_output_model(self, saved_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
@staticmethod
def _save_model(original_fn, obj, export_dir, *args, **kwargs):
original_fn(obj, export_dir, *args, **kwargs)
# store output Model
WeightsFileHandler.create_output_model(obj, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
@staticmethod
def _restore(original_fn, self, sess, save_path, *args, **kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(self, sess, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
# load model
return original_fn(self, sess, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, sess, save_path, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return model
@staticmethod
def _load(original_fn, sess, tags, export_dir, *args, **saver_kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(sess, tags, export_dir, *args, **saver_kwargs)
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
export_dir = WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
model = original_fn(sess, tags, export_dir, *args, **saver_kwargs)
else:
# try to load model before registering, it might fail
model = original_fn(sess, tags, export_dir, *args, **saver_kwargs)
WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
@staticmethod
def _ckpt_save(original_fn, self, file_prefix, *args, **kwargs):
checkpoint_path = original_fn(self, file_prefix, *args, **kwargs)
if PatchTensorflowModelIO.__main_task is None:
return checkpoint_path
WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return checkpoint_path
@staticmethod
def _ckpt_write(original_fn, self, file_prefix, *args, **kwargs):
checkpoint_path = original_fn(self, file_prefix, *args, **kwargs)
if PatchTensorflowModelIO.__main_task is None:
return checkpoint_path
WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return checkpoint_path
@staticmethod
def _ckpt_restore(original_fn, self, save_path, *args, **kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(self, save_path, *args, **kwargs)
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
save_path = WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
model = original_fn(self, save_path, *args, **kwargs)
else:
# try to load model before registering it, in case it fails.
model = original_fn(self, save_path, *args, **kwargs)
WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
class PatchTensorflow2ModelIO(object):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchTensorflow2ModelIO.__main_task = task
PatchTensorflow2ModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorflow2ModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if PatchTensorflow2ModelIO.__patched:
return
if 'tensorflow' not in sys.modules:
return
PatchTensorflow2ModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.training.tracking import util # noqa
# noinspection PyBroadException
try:
util.TrackableSaver.save = _patched_call(util.TrackableSaver.save,
PatchTensorflow2ModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
util.TrackableSaver.restore = _patched_call(util.TrackableSaver.restore,
PatchTensorflow2ModelIO._restore)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow v2')
@staticmethod
def _save(original_fn, self, file_prefix, *args, **kwargs):
model = original_fn(self, file_prefix, *args, **kwargs)
# store output Model
# noinspection PyBroadException
try:
WeightsFileHandler.create_output_model(self, file_prefix, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
return model
@staticmethod
def _restore(original_fn, self, save_path, *args, **kwargs):
if PatchTensorflow2ModelIO.__main_task is None:
return original_fn(self, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
# noinspection PyBroadException
try:
save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
# load model
return original_fn(self, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, save_path, *args, **kwargs)
# register/load model weights
# noinspection PyBroadException
try:
WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
return model
|
the-stack_106_23699 | """CSC110 Fall 2020: Climate Change and Natural Disaster Sentiment Analysis
This is the main module that converts dataset of ids to tweet sentiments and then plots the results.
Copyright and Usage Information
==============================
This file is Copyright (c) 2020 Akash Ilangovan, Vijay Sambamurthy, Dharmpreet Atwal, Issam Arabi.
"""
from datetime import datetime, timedelta
from typing import List, Tuple
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import data_management
import gen_ids
import sentiment
import hydration
API_KEY = "" # OMITTED
KEY_SECRET = "" # OMITTED
ACCESS_TOKEN = "" # OMITTED
ACCESS_TOKEN_SECRET = "" # OMITTED
KEYS = (API_KEY, KEY_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
def ids_to_sentiment(directory: str, max_tweets: int, keys: Tuple[str, str, str, str], random: bool,
output_file: str) -> None:
"""
Generates a csv of tweet sentiments from a directory of tweet ids.
"""
if directory[-1] != "/" and directory[-1] != "\\":
directory += "/"
tweet_ids = directory
csv_store = directory
if random:
gen_ids.randomize_csvs(directory, max_tweets)
tweet_ids += 'generated_ids/randomized.txt'
csv_store += 'randomized_data.csv'
else:
gen_ids.gen_regular_csvs(directory, max_tweets)
tweet_ids += 'generated_ids/regularized.txt'
csv_store += 'regularized_data.csv'
print("Tweet Ids Generated.")
hydration.generate_tweets(tweet_ids, csv_store, keys)
print("Tweets Hydrated.")
dat = data_management.read_csv_dict(csv_store)
averages = sentiment.generate_average_sentiments(dat)
data_management.convert_list_to_csv(output_file, averages)
def plot_with_dd_csv(filename: str, disaster_data: str) -> None:
"""
Plot tweet sentiment data points in conjunction with natural disaster data points
"""
data = data_management.read_csv_dict(filename)
disaster_info = data_management.read_csv_dict(disaster_data)
data = data_management.dict_to_list(data)
disaster_info = data_management.dict_to_list(disaster_info)
disaster_tweets = near_disater_data(data, disaster_info)
plottable = list_to_plottable(data)
plottable1 = list_to_plottable(disaster_tweets)
legend = ["Non-Disaster Tweets", "Disaster Tweets"]
plot([plottable, plottable1], legend)
print("Average for non-disaster:", average_of_all(data))
print("Average for disaster:", average_of_all(disaster_tweets))
def average_of_all(data: List[Tuple[datetime, float]]) -> float:
"""
Return average sentiment of a list of tweet sentiments
>>> average_of_all([(datetime(2020, 12, 11), 2.0), \
(datetime(2020, 12, 11), 4.0), (datetime(2020, 12, 12), 3.0)])
3.0
"""
sum_value = 0
for value in data:
sum_value += value[1]
return sum_value / len(data)
def near_disater_data(data: List[Tuple[datetime, float]],
info: List[Tuple[datetime]]) -> List[Tuple[datetime, float]]:
"""
Remove any tweets that are within three days of a natural disaster
and add them to a list for only disaster tweets.
>>> tweets = [(datetime(2020, 12, 6), 2.0), (datetime(2020, 12, 12), 4.0),\
(datetime(2020, 12, 10), 3.0)]
>>> disaster_info = [(datetime(2020, 12, 11),)]
>>> near_disater_data(tweets, disaster_info)
[(datetime.datetime(2020, 12, 12, 0, 0), 4.0), (datetime.datetime(2020, 12, 10, 0, 0), 3.0)]
>>> tweets
[(datetime.datetime(2020, 12, 6, 0, 0), 2.0)]
"""
near_disasters = []
for value in data:
for event in info:
span_start = event[0] - timedelta(days=3)
span_end = event[0] + timedelta(days=3)
if span_end >= value[0] >= span_start and value not in near_disasters:
near_disasters.append(value)
for item in near_disasters:
data.remove(item)
return near_disasters
def list_to_plottable(data: List) -> Tuple[List[datetime], List[float]]:
"""
Convert a list to a set of parallel lists
>>> list_to_plottable([(datetime(2020, 12, 6), 2.0), (datetime(2020, 12, 10), 3.0)])
([datetime.datetime(2020, 12, 6, 0, 0), datetime.datetime(2020, 12, 10, 0, 0)], [2.0, 3.0])
"""
list1 = []
list2 = []
for value in data:
list1.append(value[0])
list2.append(value[1])
return (list1, list2)
############################
def plot(data: List[Tuple[List, List]], legend: List[str]) -> None:
"""
Creates a scatter plot based on the input data(data) and legend (legend).
"""
colors = ['blue', 'red', 'green']
fig = plt.figure()
ax = fig.add_subplot()
plt.xlabel("Date")
for values in data:
ax.scatter(values[0], values[1], color=colors[data.index(values)])
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.ylabel("Average Sentiment Value of Tweets")
fig.legend(legend)
plt.show()
def plot_from_csv(filename: str) -> None:
"""
Directly attempts to plot from a given CSV File.
"""
data = data_management.read_csv_dict(filename)
data = data_management.dict_to_list(data)
plottable = list_to_plottable(data)
plot([plottable], ['All tweets on the day'])
if __name__ == "__main__":
data_file = "data/backup_sentiments.csv"
# This file has already been generated by the output of the line of code below
# calling ids_to_sentiment.
# In order to run it, please fill in the twitter api KEYS at the top. For more reference
# please check twarc documentation at https://github.com/DocNow/twarc.
# Also check twitter TOS and api https://developer.twitter.com/
# Make sure there are txt/csv files within twarc_data/csvs/ for this program to work. Any number
# of csvs would work.
# Un-comment this line if you would like to test this with hydration.
# ids_to_sentiment("twarc_data/", 20000, KEYS, False, data_file)
plot_with_dd_csv(data_file, 'data/Disasters_Data.csv')
import doctest
doctest.testmod()
|
the-stack_106_23702 | from tello import Tello
import sys
from datetime import datetime
import time
# from PIL import Image
import cv2
import numpy as np
import rospy
import std_msgs.msg
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import camera_info_manager as cim
from sensor_msgs.msg import CameraInfo
import tf
import tf2_ros
import tf2_msgs.msg
import geometry_msgs.msg
class TelloVisualOdometry():
def __init__(self):
# Parameters
self.loop_rate = rospy.Rate(30)
self.drone_speed = int(100) #Speed of the Tello drone in cm/s (10-100)
self.drone_speed = np.clip(self.drone_speed, 10, 100)
# Set waypoints
self.final_goal_reached = False
self.goal_index = 0
self.relative_waypoints = []
half_box_len = 0.5 # Half the length of the box in meterss
# Start with Drone placed at the center.
self.relative_waypoints.append([half_box_len, half_box_len, 0]) # Center to Top Left
self.relative_waypoints.append([-2*half_box_len, 0, 0]) # Top Left to Bottom Left
self.relative_waypoints.append([0, -2*half_box_len, 0]) # Bottom Left to Bottom Right
self.relative_waypoints.append([2*half_box_len, 0, 0]) # Bottom right to Top Right
self.relative_waypoints.append([0, 2*half_box_len, 0]) # Top Right to Top Left
self.relative_waypoints.append([-2*half_box_len, 0, 0]) # Top Left to Bottom Left
self.relative_waypoints.append([0, -2*half_box_len, 0]) # Bottom Left to Bottom Right
self.relative_waypoints.append([2*half_box_len, 0, 0]) # Bottom right to Top Right
# self.relative_waypoints.append([-half_box_len, half_box_len, 0]) # Top Left to Center
# Connect to Tello
self.command_lock = False
self.tello = Tello('', 8889)
self.send_command_until_ack('command')
self.send_command_until_ack('streamon')
def check_battery(self):
while self.command_lock:
rospy.sleep(0.1)
print("Checking battery percentage.")
if not self.command_lock:
self.command_lock = True
max_n_retry = 10
n_retry = 0
battery_percentage = -1
while n_retry < max_n_retry:
response = self.tello.send_command("battery?")
try:
battery_percentage = int(response)
print("Battery at {} percent.".format(battery_percentage))
self.command_lock = False
return battery_percentage
except:
print("Error: Could not convert response = {} to battery precentage.".format(response))
rospy.sleep(0.1)
n_retry = n_retry + 1
self.command_lock = False
if n_retry == max_n_retry:
raise ValueError('Error: Max number of retries reached.')
def send_command_until_ack(self, cmd_str):
if not self.command_lock:
self.command_lock = True
ack = "not ok"
max_n_retry = 10
n_retry = 0
while ack != 'ok' and n_retry < max_n_retry:
ack = self.tello.send_command(cmd_str)
print(ack)
rospy.sleep(0.1)
n_retry = n_retry + 1
self.command_lock = False
if n_retry == max_n_retry:
raise ValueError('Error: Max number of retries reached.')
def start(self):
# Takeoff
self.send_command_until_ack('takeoff')
self.is_drone_in_the_air = True
rospy.sleep(5)
# Set Tello speed.
self.send_command_until_ack('speed ' + str(self.drone_speed))
for i in range(len(self.relative_waypoints)):
# Extract translations (convert from m to cm)
x_cm = int(np.round(self.relative_waypoints[i][0]*100))
y_cm = int(np.round(self.relative_waypoints[i][1]*100))
z_cm = int(np.round(self.relative_waypoints[i][2]*100))
x_cm = np.clip(x_cm, -500, 500)
y_cm = np.clip(y_cm, -500, 500)
z_cm = np.clip(z_cm, -500, 500)
print("Moving to x={}, y={}, z={} relative to the current position. ".format(x_cm, y_cm, z_cm))
self.send_command_until_ack('go {} {} {} {}'.format(x_cm, y_cm, z_cm, self.drone_speed))
d = np.sqrt(x_cm**2 + y_cm**2 + z_cm**2)
time_to_move = d / self.drone_speed
rospy.sleep(time_to_move + 5.0)
self.final_goal_reached = True
self.end()
def end(self):
# Stop drone video
self.tello.video_freeze()
if self.final_goal_reached:
print("\n\n\nFinal goal reached!!! Landing drone. =)\n\n\n")
# Land the drone.
self.send_command_until_ack('land')
rospy.sleep(5)
print("Drone has landed.")
# Print final battery percentage.
self.check_battery()
def __del__(self):
print("TelloVisualOdometry object deconstructor called.")
self.end()
# Delete tello object to close socket.
del self.tello
print("tello object deconstructed")
if __name__ == "__main__":
rospy.init_node("Tello_Visual_Odometry_Node", anonymous=False)
tello_node = TelloVisualOdometry()
tello_node.start()
del tello_node |
the-stack_106_23703 | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
x = list(map(int, input().split()))
for index, xi in enumerate(x, 1):
if xi == 0:
print(index)
exit()
if __name__ == '__main__':
main()
|
the-stack_106_23705 | from brownie import interface
from rich.console import Console
from tabulate import tabulate
from helpers.utils import val
from helpers.multicall import Call, as_wei, func
from .StrategyCoreResolver import StrategyCoreResolver
console = Console()
class StrategyDiggRewardsResolver(StrategyCoreResolver):
# ===== Strategies must implement =====
def confirm_rebase(self, before, after, value):
"""
bDIGG values should stay the same.
"""
super().confirm_rebase(before, after, value)
assert after.get("sett.totalSupply") == before.get("sett.totalSupply")
def printHarvestState(self, event, keys):
table = []
console.print("[blue]== Harvest State ==[/blue]")
for key in keys:
table.append([key, val(event[key])])
print(tabulate(table, headers=["account", "value"]))
def confirm_harvest_events(self, before, after, tx):
key = 'HarvestState'
assert key in tx.events
assert len(tx.events[key]) == 1
event = tx.events[key][0]
keys = [
'totalDigg',
'totalShares',
'totalScaledShares',
'diggIncrease',
'sharesIncrease',
'scaledSharesIncrease',
]
for key in keys:
assert key in event
self.printHarvestState(event, keys)
def confirm_harvest(self, before, after, tx):
super().confirm_harvest(before, after, tx)
# No staking position, strategy want should increase irrespective of
# current balance.
# TODO: Add more specific check that the correct reward amount was deposited.
assert (
after.get("strategy.balanceOf") >= before.get("strategy.balanceOf")
)
# PPFS should not decrease
assert after.get("sett.pricePerFullShare") >= before.get(
"sett.pricePerFullShare"
)
def confirm_deposit(self, before, after, params):
"""
Since DIGG is a rebasing token, the amount of shares
transferred per DIGG changes over time so we need to
calculated expected shares using the following equation:
(sharesTransferred * totalSupply) / poolBefore
Note that shares scale values are scaled down to 18 decimal
values (e.g. sharesTransferred, poolBefore).
"""
digg = self.manager.badger.digg.token
sharesTransferred = after.get("sett.shares") - before.get("sett.shares")
sharesTransferredScaled = digg.sharesToScaledShares(sharesTransferred)
totalSupply = before.get("sett.totalSupply") # bDIGG is already at 18 decimal scale
if totalSupply == 0:
expected_shares = sharesTransferredScaled
else:
poolBefore = before.get("sett.shares")
poolBeforeScaled = digg.sharesToScaledShares(poolBefore)
expected_shares = (sharesTransferredScaled * totalSupply) / poolBeforeScaled
params["expected_shares"] = expected_shares
# We need to pass in expected_shares to the core resolver so we call the
# super method down here.
super().confirm_deposit(before, after, params)
def add_balances_snap(self, calls, entities):
calls = super().add_balances_snap(calls, entities)
digg = interface.IERC20(self.manager.strategy.want())
calls = self.add_entity_balances_for_tokens(calls, "digg", digg, entities)
calls = self.add_entity_shares_for_tokens(calls, "digg", digg, entities)
return calls
def get_strategy_destinations(self):
strategy = self.manager.strategy
return {
"diggFaucet": strategy.diggFaucet(),
}
def add_strategy_snap(self, calls, entities=None):
super().add_strategy_snap(calls)
sett = self.manager.sett
strategy = self.manager.strategy
calls.append(
Call(
strategy.diggFaucet(),
[func.diggFaucet.earned],
[["diggFaucet.earned", as_wei]],
)
)
# Sett Shares
calls.append(Call(sett.address, [func.sett.shares], [["sett.shares", as_wei]],))
# Strategy Shares
calls.append(
Call(
strategy.address,
[func.strategy.sharesOf],
[["strategy.sharesOf", as_wei]],
)
)
calls.append(
Call(
strategy.address,
[func.strategy.sharesOfPool],
[["strategy.sharesOfPool", as_wei]],
)
)
calls.append(
Call(
strategy.address,
[func.strategy.sharesOfWant],
[["strategy.sharesOfWant", as_wei]],
)
)
return calls
|
the-stack_106_23707 | #!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter06/test_tls.py
# Attempt a TLS connection and, if successful, report its properties
import argparse, socket, ssl, sys, textwrap
import ctypes
from pprint import pprint
def open_tls(context, address, server=False):
raw_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if server:
raw_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
raw_sock.bind(address)
raw_sock.listen(1)
say('Interface where we are listening', address)
raw_client_sock, address = raw_sock.accept()
say('Client has connected from address', address)
return context.wrap_socket(raw_client_sock, server_side=True)
else:
say('Address we want to talk to', address)
raw_sock.connect(address)
return context.wrap_socket(raw_sock)
def describe(ssl_sock, hostname, server=False, debug=False):
cert = ssl_sock.getpeercert()
if cert is None:
say('Peer certificate', 'none')
else:
say('Peer certificate', 'provided')
subject = cert.get('subject', [])
names = [name for names in subject for (key, name) in names
if key == 'commonName']
if 'subjectAltName' in cert:
names.extend(name for (key, name) in cert['subjectAltName']
if key == 'DNS')
say('Name(s) on peer certificate', *names or ['none'])
if (not server) and names:
try:
ssl.match_hostname(cert, hostname)
except ssl.CertificateError as e:
message = str(e)
else:
message = 'Yes'
say('Whether name(s) match the hostname', message)
for category, count in sorted(context.cert_store_stats().items()):
say('Certificates loaded of type {}'.format(category), count)
try:
protocol_version = SSL_get_version(ssl_sock)
except Exception:
if debug:
raise
else:
say('Protocol version negotiated', protocol_version)
cipher, version, bits = ssl_sock.cipher()
compression = ssl_sock.compression()
say('Cipher chosen for this connection', cipher)
say('Cipher defined in TLS version', version)
say('Cipher key has this many bits', bits)
say('Compression algorithm in use', compression or 'none')
return cert
class PySSLSocket(ctypes.Structure):
"""The first few fields of a PySSLSocket (see Python's Modules/_ssl.c)."""
_fields_ = [('ob_refcnt', ctypes.c_ulong), ('ob_type', ctypes.c_void_p),
('Socket', ctypes.c_void_p), ('ssl', ctypes.c_void_p)]
def SSL_get_version(ssl_sock):
"""Reach behind the scenes for a socket's TLS protocol version."""
if sys.version_info >= (3, 5):
return ssl_sock.version()
lib = ctypes.CDLL(ssl._ssl.__file__)
lib.SSL_get_version.restype = ctypes.c_char_p
address = id(ssl_sock._sslobj)
struct = ctypes.cast(address, ctypes.POINTER(PySSLSocket)).contents
version_bytestring = lib.SSL_get_version(struct.ssl)
return version_bytestring.decode('ascii')
def lookup(prefix, name):
if not name.startswith(prefix):
name = prefix + name
try:
return getattr(ssl, name)
except AttributeError:
matching_names = (s for s in dir(ssl) if s.startswith(prefix))
message = 'Error: {!r} is not one of the available names:\n {}'.format(
name, ' '.join(sorted(matching_names)))
print(fill(message), file=sys.stderr)
sys.exit(2)
def say(title, *words):
print(fill(title.ljust(36, '.') + ' ' + ' '.join(str(w) for w in words)))
def fill(text):
return textwrap.fill(text, subsequent_indent=' ',
break_long_words=False, break_on_hyphens=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Protect a socket with TLS')
parser.add_argument('host', help='hostname or IP address')
parser.add_argument('port', type=int, help='TCP port number')
parser.add_argument('-a', metavar='cafile', default=None,
help='authority: path to CA certificate PEM file')
parser.add_argument('-c', metavar='certfile', default=None,
help='path to PEM file with client certificate')
parser.add_argument('-C', metavar='ciphers', default='ALL',
help='list of ciphers, formatted per OpenSSL')
parser.add_argument('-p', metavar='PROTOCOL', default='SSLv23',
help='protocol version (default: "SSLv23")')
parser.add_argument('-s', metavar='certfile', default=None,
help='run as server: path to certificate PEM file')
parser.add_argument('-d', action='store_true', default=False,
help='debug mode: do not hide "ctypes" exceptions')
parser.add_argument('-v', action='store_true', default=False,
help='verbose: print out remote certificate')
args = parser.parse_args()
address = (args.host, args.port)
protocol = lookup('PROTOCOL_', args.p)
context = ssl.SSLContext(protocol)
context.set_ciphers(args.C)
context.check_hostname = False
if (args.s is not None) and (args.c is not None):
parser.error('you cannot specify both -c and -s')
elif args.s is not None:
context.verify_mode = ssl.CERT_OPTIONAL
purpose = ssl.Purpose.CLIENT_AUTH
context.load_cert_chain(args.s)
else:
context.verify_mode = ssl.CERT_REQUIRED
purpose = ssl.Purpose.SERVER_AUTH
if args.c is not None:
context.load_cert_chain(args.c)
if args.a is None:
context.load_default_certs(purpose)
else:
context.load_verify_locations(args.a)
print()
ssl_sock = open_tls(context, address, args.s)
cert = describe(ssl_sock, args.host, args.s, args.d)
print()
if args.v:
pprint(cert)
|
the-stack_106_23708 | # -*- coding: utf-8 -*-
import pandas as pd
from functools import wraps
from ..common import _getJson, _dateRange, _strOrDate, _toDatetime
def timeSeriesInventory(token='', version=''):
'''Get inventory of available time series endpoints
Returns:
result (dict)
'''
return _getJson('time-series/', token, version)
def timeSeriesInventoryDF(token='', version=''):
'''Get inventory of available time series endpoints
Returns:
result (DataFrame)
'''
return pd.io.json.json_normalize(timeSeriesInventory(token=token, version=version))
def timeSeries(id='',
key='',
subkey='',
range=None,
calendar=False,
limit=1,
subattribute='',
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
token='',
version='',
filter=''):
'''This is a meeting where company executives provide information about the company’s performance and its future prospects.
6am , 10am , 2pm , 6pm and 9pm daily
https://iexcloud.io/docs/api/#time-series
Args:
id (str): ID used to identify a time series dataset.
key (str): Key used to identify data within a dataset. A common example is a symbol such as AAPL.
subkey (str): The optional subkey can used to further refine data for a particular key if available.
range (str): Returns data for a given range. Supported ranges described below.
calendar (bool): Used in conjunction with range to return data in the future.
limit (int): Limits the number of results returned. Defaults to 1.
subattribute (str): Allows you to query time series by any field in the result set. All time series data is stored by ID, then key, then subkey. If you want to query by any other field in the data, you can use subattribute.
For example, news may be stored as /news/{symbol}/{newsId}, and the result data returns the keys id, symbol, date, sector, hasPaywall
By default you can only query by symbol or id. Maybe you want to query all news where the sector is Technology. Your query would be:
/time-series/news?subattribute=source|WSJ
The syntax is subattribute={keyName}|{value}. Both the key name and the value are case sensitive. A pipe symbol is used to represent ‘equal to’.
dateField (str or datetime): All time series data is stored by a single date field, and that field is used for any range or date parameters. You may want to query time series data by a different date in the result set. To change the date field used by range queries, pass the case sensitive field name with this parameter.
For example, corporate buy back data may be stored by announce date, but also contains an end date which you’d rather query by. To query by end date you would use dateField=endDate&range=last-week
from_ (str or datetime): Returns data on or after the given from date. Format YYYY-MM-DD
to_ (str or datetime): Returns data on or before the given to date. Format YYYY-MM-DD
on (str or datetime): Returns data on the given date. Format YYYY-MM-DD
last (int): Returns the latest n number of records in the series
first (int): Returns the first n number of records in the series
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
dict: result
Date Ranges:
today Returns data for today
yesterday Returns data for yesterday
ytd Returns data for the current year
last-week Returns data for Sunday-Saturday last week
last-month Returns data for the last month
last-quarter Returns data for the last quarter
d Use the short hand d to return a number of days. Example: 2d returns 2 days.
If calendar=true, data is returned from today forward.
w Use the short hand w to return a number of weeks. Example: 2w returns 2 weeks.
If calendar=true, data is returned from today forward.
m Use the short hand m to return a number of months. Example: 2m returns 2 months.
If calendar=true, data is returned from today forward.
q Use the short hand q to return a number of quarters. Example: 2q returns 2 quarters.
If calendar=true, data is returned from today forward.
y Use the short hand y to return a number of years. Example: 2y returns 2 years.
If calendar=true, data is returned from today forward.
tomorrow Calendar data for tomorrow. Requires calendar=true
this-week Calendar data for Sunday-Saturday this week. Requires calendar=true
this-month Calendar data for current month. Requires calendar=true
this-quarter Calendar data for current quarter. Requires calendar=true
next-week Calendar data for Sunday-Saturday next week. Requires calendar=true
next-month Calendar data for next month. Requires calendar=true
next-quarter Calendar data for next quarter. Requires calendar=true
'''
if not id:
return timeSeriesInventory(token=token, version=version)
base_url = 'time-series/{}'.format(id)
if key:
base_url += '/{}'.format(key)
if subkey:
base_url += '/{}'.format(subkey)
base_url += '?'
if range:
range = _dateRange(range)
base_url += 'range={}&'.format(range)
base_url += 'calendar={}&'.format(str(calendar))
base_url += 'limit={}&'.format(str(limit))
if subattribute:
base_url += 'subattribute={}&'.format(subattribute)
if dateField:
base_url += 'dateField={}&'.format(dateField)
if from_:
base_url += 'from={}&'.format(_strOrDate(from_))
if to_:
base_url += 'to={}&'.format(_strOrDate(to_))
if on:
base_url += 'on={}&'.format(_strOrDate(on))
if last:
base_url += 'last={}&'.format(str(last))
if first:
base_url += 'first={}&'.format(str(first))
return _getJson(base_url, token, version, filter)
@wraps(timeSeries)
def timeSeriesDF(*args, **kwargs):
df = pd.io.json.json_normalize(timeSeries(*args, **kwargs))
_toDatetime(df)
return df
@wraps(timeSeries)
def tenQ(symbol, **kwargs):
kwargs.pop('id')
kwargs.pop('key')
kwargs.pop('subkey')
return timeSeries(id='REPORTED_FINANCIALS', key=symbol, subkey='10-Q', **kwargs)
@wraps(timeSeries)
def tenK(symbol, **kwargs):
kwargs.pop('id')
kwargs.pop('key')
kwargs.pop('subkey')
return timeSeries(id='REPORTED_FINANCIALS', key=symbol, subkey='10-Q', **kwargs)
|
the-stack_106_23710 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for long running jobs and continuous computations."""
import ast
import logging
import re
from core import jobs
from core import jobs_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
from google.appengine.ext import ndb
from mapreduce import input_readers
(base_models, exp_models, stats_models, job_models) = (
models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration,
models.NAMES.statistics, models.NAMES.job]))
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
JOB_FAILED_MESSAGE = 'failed (as expected)'
class MockJobManagerOne(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
return 'output'
class MockJobManagerTwo(jobs.BaseDeferredJobManager):
pass
class MockJobManagerWithParams(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
return additional_job_params['correct']
class MockFailingJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls, additional_job_params):
raise Exception(JOB_FAILED_MESSAGE)
class MockJobManagerWithNoRunMethod(jobs.BaseDeferredJobManager):
pass
class JobManagerUnitTests(test_utils.GenericTestBase):
"""Test basic job manager operations."""
def test_create_new(self):
"""Test the creation of a new job."""
job_id = MockJobManagerOne.create_new()
self.assertTrue(job_id.startswith('MockJobManagerOne'))
self.assertEqual(
MockJobManagerOne.get_status_code(job_id), jobs.STATUS_CODE_NEW)
self.assertIsNone(MockJobManagerOne.get_time_queued_msec(job_id))
self.assertIsNone(MockJobManagerOne.get_time_started_msec(job_id))
self.assertIsNone(MockJobManagerOne.get_time_finished_msec(job_id))
self.assertIsNone(MockJobManagerOne.get_metadata(job_id))
self.assertIsNone(MockJobManagerOne.get_output(job_id))
self.assertIsNone(MockJobManagerOne.get_error(job_id))
self.assertFalse(MockJobManagerOne.is_active(job_id))
self.assertFalse(MockJobManagerOne.has_finished(job_id))
def test_base_job_manager_enqueue_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseJobManager should implement _real_enqueue().'):
jobs.BaseJobManager._real_enqueue( # pylint: disable=protected-access
'job_id', taskqueue_services.QUEUE_NAME_DEFAULT, None)
def test_failing_jobs(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'error', _mock_logging_function)
# Mocks GoogleCloudStorageInputReader() to fail a job.
_mock_input_reader = lambda _, __: 1 / 0
input_reader_swap = self.swap(
input_readers, 'GoogleCloudStorageInputReader', _mock_input_reader)
assert_raises_context_manager = self.assertRaises(Exception)
job_id = MockJobManagerOne.create_new()
store_map_reduce_results = jobs.StoreMapReduceResults()
with input_reader_swap, assert_raises_context_manager, logging_swap:
store_map_reduce_results.run(
job_id, 'core.jobs_test.MockJobManagerOne', 'output')
expected_log_message = 'Job %s failed at' % job_id
# The first log message is ignored as it is the traceback.
self.assertEqual(len(observed_log_messages), 2)
self.assertTrue(
observed_log_messages[1].startswith(expected_log_message))
def test_enqueue_job(self):
"""Test the enqueueing of a job."""
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_DEFAULT), 1)
self.assertEqual(
MockJobManagerOne.get_status_code(job_id), jobs.STATUS_CODE_QUEUED)
self.assertIsNotNone(MockJobManagerOne.get_time_queued_msec(job_id))
self.assertIsNone(MockJobManagerOne.get_output(job_id))
def test_failure_for_job_enqueued_using_wrong_manager(self):
job_id = MockJobManagerOne.create_new()
with self.assertRaisesRegexp(Exception, 'Invalid job type'):
MockJobManagerTwo.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
def test_failure_for_job_with_no_run_method(self):
job_id = MockJobManagerWithNoRunMethod.create_new()
MockJobManagerWithNoRunMethod.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
with self.assertRaisesRegexp(Exception, 'NotImplementedError'):
self.process_and_flush_pending_tasks()
def test_complete_job(self):
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
self.assertEqual(
MockJobManagerOne.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
time_queued_msec = MockJobManagerOne.get_time_queued_msec(job_id)
time_started_msec = MockJobManagerOne.get_time_started_msec(job_id)
time_finished_msec = MockJobManagerOne.get_time_finished_msec(job_id)
self.assertIsNotNone(time_queued_msec)
self.assertIsNotNone(time_started_msec)
self.assertIsNotNone(time_finished_msec)
self.assertLess(time_queued_msec, time_started_msec)
self.assertLess(time_started_msec, time_finished_msec)
metadata = MockJobManagerOne.get_metadata(job_id)
output = MockJobManagerOne.get_output(job_id)
error = MockJobManagerOne.get_error(job_id)
self.assertIsNone(metadata)
self.assertEqual(output, ['output'])
self.assertIsNone(error)
self.assertFalse(MockJobManagerOne.is_active(job_id))
self.assertTrue(MockJobManagerOne.has_finished(job_id))
def test_deferred_job_with_additional_params(self):
"""Test the enqueueing of a job with additional parameters."""
job_id_1 = MockJobManagerWithParams.create_new()
MockJobManagerWithParams.enqueue(
job_id_1, taskqueue_services.QUEUE_NAME_DEFAULT,
additional_job_params={'random': 3, 'correct': 60})
job_id_2 = MockJobManagerWithParams.create_new()
MockJobManagerWithParams.enqueue(
job_id_2, taskqueue_services.QUEUE_NAME_DEFAULT,
additional_job_params={'random': 20, 'correct': 25})
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
2)
self.process_and_flush_pending_tasks()
self.assertTrue(MockJobManagerWithParams.has_finished(job_id_1))
self.assertEqual(MockJobManagerWithParams.get_output(job_id_1), ['60'])
self.assertTrue(MockJobManagerWithParams.has_finished(job_id_2))
self.assertEqual(MockJobManagerWithParams.get_output(job_id_2), ['25'])
def test_job_failure(self):
job_id = MockFailingJobManager.create_new()
MockFailingJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
with self.assertRaisesRegexp(Exception, 'Task failed'):
self.process_and_flush_pending_tasks()
self.assertEqual(
MockFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
time_queued_msec = MockFailingJobManager.get_time_queued_msec(job_id)
time_started_msec = MockFailingJobManager.get_time_started_msec(
job_id)
time_finished_msec = MockFailingJobManager.get_time_finished_msec(
job_id)
self.assertIsNotNone(time_queued_msec)
self.assertIsNotNone(time_started_msec)
self.assertIsNotNone(time_finished_msec)
self.assertLess(time_queued_msec, time_started_msec)
self.assertLess(time_started_msec, time_finished_msec)
metadata = MockFailingJobManager.get_metadata(job_id)
output = MockFailingJobManager.get_output(job_id)
error = MockFailingJobManager.get_error(job_id)
self.assertIsNone(metadata)
self.assertIsNone(output)
self.assertIn(JOB_FAILED_MESSAGE, error)
self.assertFalse(MockFailingJobManager.is_active(job_id))
self.assertTrue(MockFailingJobManager.has_finished(job_id))
def test_status_code_transitions(self):
"""Test that invalid status code transitions are caught."""
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
MockJobManagerOne.register_start(job_id)
MockJobManagerOne.register_completion(job_id, ['output'])
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockJobManagerOne.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockJobManagerOne.register_completion(job_id, ['output'])
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockJobManagerOne.register_failure(job_id, 'error')
def test_different_jobs_are_independent(self):
job_id = MockJobManagerOne.create_new()
another_job_id = MockJobManagerTwo.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
MockJobManagerOne.register_start(job_id)
MockJobManagerTwo.enqueue(
another_job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
MockJobManagerOne.get_status_code(job_id), jobs.STATUS_CODE_STARTED)
self.assertEqual(
MockJobManagerTwo.get_status_code(another_job_id),
jobs.STATUS_CODE_QUEUED)
def test_cannot_instantiate_jobs_from_abstract_base_classes(self):
with self.assertRaisesRegexp(
Exception, 'directly create a job using the abstract base'
):
jobs.BaseJobManager.create_new()
def test_cannot_enqueue_same_job_twice(self):
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockJobManagerOne.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
def test_can_enqueue_two_instances_of_the_same_job(self):
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
job_id_2 = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(
job_id_2, taskqueue_services.QUEUE_NAME_DEFAULT)
def test_cancel_kills_queued_job(self):
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertTrue(MockJobManagerOne.is_active(job_id))
MockJobManagerOne.cancel(job_id, 'admin_user_id')
self.assertFalse(MockJobManagerOne.is_active(job_id))
self.assertEqual(
MockJobManagerOne.get_status_code(job_id),
jobs.STATUS_CODE_CANCELED)
self.assertIsNone(MockJobManagerOne.get_output(job_id))
self.assertEqual(
MockJobManagerOne.get_error(job_id), 'Canceled by admin_user_id')
def test_cancel_kills_started_job(self):
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertTrue(MockJobManagerOne.is_active(job_id))
MockJobManagerOne.register_start(job_id)
# Cancel the job immediately after it has started.
MockJobManagerOne.cancel(job_id, 'admin_user_id')
# The job then finishes.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockJobManagerOne.register_completion(job_id, ['job_output'])
self.assertFalse(MockJobManagerOne.is_active(job_id))
self.assertEqual(
MockJobManagerOne.get_status_code(job_id),
jobs.STATUS_CODE_CANCELED)
# Note that no results are recorded for this job.
self.assertIsNone(MockJobManagerOne.get_output(job_id))
self.assertEqual(
MockJobManagerOne.get_error(job_id), 'Canceled by admin_user_id')
def test_cancel_does_not_kill_completed_job(self):
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertTrue(MockJobManagerOne.is_active(job_id))
# Complete the job.
self.process_and_flush_pending_tasks()
self.assertFalse(MockJobManagerOne.is_active(job_id))
self.assertEqual(
MockJobManagerOne.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
# Cancel the job after it has finished.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockJobManagerOne.cancel(job_id, 'admin_user_id')
# The job should still have 'completed' status.
self.assertFalse(MockJobManagerOne.is_active(job_id))
self.assertEqual(
MockJobManagerOne.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
self.assertEqual(MockJobManagerOne.get_output(job_id), ['output'])
self.assertIsNone(MockJobManagerOne.get_error(job_id))
def test_cancel_does_not_kill_failed_job(self):
job_id = MockFailingJobManager.create_new()
MockFailingJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertTrue(MockFailingJobManager.is_active(job_id))
with self.assertRaisesRegexp(Exception, 'Task failed'):
self.process_and_flush_pending_tasks()
self.assertFalse(MockFailingJobManager.is_active(job_id))
self.assertEqual(
MockFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
# Cancel the job after it has finished.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
MockFailingJobManager.cancel(job_id, 'admin_user_id')
# The job should still have 'failed' status.
self.assertFalse(MockFailingJobManager.is_active(job_id))
self.assertEqual(
MockFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
self.assertIsNone(MockFailingJobManager.get_output(job_id))
self.assertIn(
'raise Exception', MockFailingJobManager.get_error(job_id))
def test_cancelling_multiple_unfinished_jobs(self):
job1_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(
job1_id, taskqueue_services.QUEUE_NAME_DEFAULT)
job2_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(
job2_id, taskqueue_services.QUEUE_NAME_DEFAULT)
MockJobManagerOne.register_start(job1_id)
MockJobManagerOne.register_start(job2_id)
MockJobManagerOne.cancel_all_unfinished_jobs('admin_user_id')
self.assertFalse(MockJobManagerOne.is_active(job1_id))
self.assertFalse(MockJobManagerOne.is_active(job2_id))
self.assertEqual(
MockJobManagerOne.get_status_code(job1_id),
jobs.STATUS_CODE_CANCELED)
self.assertEqual(
MockJobManagerOne.get_status_code(job2_id),
jobs.STATUS_CODE_CANCELED)
self.assertIsNone(MockJobManagerOne.get_output(job1_id))
self.assertIsNone(MockJobManagerOne.get_output(job2_id))
self.assertEqual(
'Canceled by admin_user_id', MockJobManagerOne.get_error(job1_id))
self.assertEqual(
'Canceled by admin_user_id', MockJobManagerOne.get_error(job2_id))
def test_cancelling_one_unfinished_job(self):
job1_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(
job1_id, taskqueue_services.QUEUE_NAME_DEFAULT)
job2_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(
job2_id, taskqueue_services.QUEUE_NAME_DEFAULT)
MockJobManagerOne.register_start(job1_id)
MockJobManagerOne.register_start(job2_id)
MockJobManagerOne.cancel(job1_id, 'admin_user_id')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
self.process_and_flush_pending_tasks()
MockJobManagerOne.register_completion(job2_id, ['output'])
self.assertFalse(MockJobManagerOne.is_active(job1_id))
self.assertFalse(MockJobManagerOne.is_active(job2_id))
self.assertEqual(
MockJobManagerOne.get_status_code(job1_id),
jobs.STATUS_CODE_CANCELED)
self.assertEqual(
MockJobManagerOne.get_status_code(job2_id),
jobs.STATUS_CODE_COMPLETED)
self.assertIsNone(MockJobManagerOne.get_output(job1_id))
self.assertEqual(MockJobManagerOne.get_output(job2_id), ['output'])
self.assertEqual(
'Canceled by admin_user_id', MockJobManagerOne.get_error(job1_id))
self.assertIsNone(MockJobManagerOne.get_error(job2_id))
def test_compress_output_list_with_single_char_outputs(self):
input_list = [1, 2, 3, 4, 5]
expected_output = ['1', '2', '3', '<TRUNCATED>']
actual_output = jobs.BaseJobManager._compress_output_list( # pylint: disable=protected-access
input_list, test_only_max_output_len_chars=3)
self.assertEqual(actual_output, expected_output)
def test_compress_output_list_with_multi_char_outputs(self):
input_list = ['abcd', 'efgh', 'ijkl']
expected_output = ['abcd', 'efgh', 'ij <TRUNCATED>']
actual_output = jobs.BaseJobManager._compress_output_list( # pylint: disable=protected-access
input_list, test_only_max_output_len_chars=10)
self.assertEqual(actual_output, expected_output)
def test_compress_output_list_with_zero_max_output_len(self):
input_list = [1, 2, 3]
expected_output = ['<TRUNCATED>']
actual_output = jobs.BaseJobManager._compress_output_list( # pylint: disable=protected-access
input_list, test_only_max_output_len_chars=0)
self.assertEqual(actual_output, expected_output)
def test_compress_output_list_with_exact_max_output_len(self):
input_list = ['abc']
expected_output = ['abc']
actual_output = jobs.BaseJobManager._compress_output_list( # pylint: disable=protected-access
input_list, test_only_max_output_len_chars=3)
self.assertEqual(actual_output, expected_output)
def test_compress_output_list_with_empty_outputs(self):
input_list = []
expected_output = []
actual_output = jobs.BaseJobManager._compress_output_list(input_list) # pylint: disable=protected-access
self.assertEqual(actual_output, expected_output)
def test_compress_output_list_with_duplicate_outputs(self):
input_list = ['bar', 'foo'] * 3
expected_output = ['(3x) bar', '(3x) foo']
actual_output = jobs.BaseJobManager._compress_output_list( # pylint: disable=protected-access
input_list,
# Make sure no output gets truncated.
test_only_max_output_len_chars=sum(len(s) for s in expected_output))
self.assertEqual(actual_output, expected_output)
def test_compress_output_list_with_truncated_duplicate_outputs(self):
input_list = ['supercalifragilisticexpialidocious'] * 3
expected_output = ['(3x) super <TRUNCATED>']
actual_output = jobs.BaseJobManager._compress_output_list( # pylint: disable=protected-access
input_list, test_only_max_output_len_chars=10)
self.assertEqual(actual_output, expected_output)
SUM_MODEL_ID = 'all_data_id'
class MockNumbersModel(ndb.Model):
number = ndb.IntegerProperty()
class MockSumModel(ndb.Model):
total = ndb.IntegerProperty(default=0)
failed = ndb.BooleanProperty(default=False)
class TestDeferredJobManager(jobs.BaseDeferredJobManager):
"""Base class for testing deferred jobs."""
pass
class TestAdditionJobManager(TestDeferredJobManager):
"""Test job that sums all MockNumbersModel data.
The result is stored in a MockSumModel entity with id SUM_MODEL_ID.
"""
@classmethod
def _run(cls, additional_job_params):
total = sum([
numbers_model.number for numbers_model in MockNumbersModel.query()])
MockSumModel(id=SUM_MODEL_ID, total=total).put()
class FailingAdditionJobManager(TestDeferredJobManager):
"""Test job that stores stuff in MockSumModel and then fails."""
@classmethod
def _run(cls, additional_job_params):
total = sum([
numbers_model.number for numbers_model in MockNumbersModel.query()])
MockSumModel(id=SUM_MODEL_ID, total=total).put()
raise Exception('Oops, I failed.')
@classmethod
def _post_failure_hook(cls, job_id):
model = MockSumModel.get_by_id(SUM_MODEL_ID)
model.failed = True
model.put()
class DatastoreJobIntegrationTests(test_utils.GenericTestBase):
"""Tests the behavior of a job that affects data in the datastore.
This job gets all MockNumbersModel instances and sums their values, and puts
the summed values in a MockSumModel instance with id SUM_MODEL_ID. The
computation is redone from scratch each time the job is run.
"""
def _get_stored_total(self):
"""Returns the total summed values of all the MockNumbersModel instances
stored in a MockSumModel instance.
"""
sum_model = MockSumModel.get_by_id(SUM_MODEL_ID)
return sum_model.total if sum_model else 0
def _populate_data(self):
"""Populate the datastore with four MockNumbersModel instances."""
MockNumbersModel(number=1).put()
MockNumbersModel(number=2).put()
MockNumbersModel(number=1).put()
MockNumbersModel(number=2).put()
def test_sequential_jobs(self):
self._populate_data()
self.assertEqual(self._get_stored_total(), 0)
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new(),
taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 6)
MockNumbersModel(number=3).put()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new(),
taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 9)
def test_multiple_enqueued_jobs(self):
self._populate_data()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new(),
taskqueue_services.QUEUE_NAME_DEFAULT)
MockNumbersModel(number=3).put()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new(),
taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
2)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 9)
def test_failing_job(self):
self._populate_data()
job_id = FailingAdditionJobManager.create_new()
FailingAdditionJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
with self.assertRaisesRegexp(
taskqueue_services.PermanentTaskFailure, 'Oops, I failed'
):
self.process_and_flush_pending_tasks()
# The work that the failing job did before it failed is still done.
self.assertEqual(self._get_stored_total(), 6)
# The post-failure hook should have run.
self.assertTrue(MockSumModel.get_by_id(SUM_MODEL_ID).failed)
self.assertTrue(
FailingAdditionJobManager.get_status_code(job_id),
msg=jobs.STATUS_CODE_FAILED)
class SampleMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""Test job that counts the total number of explorations."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
current_class = SampleMapReduceJobManager
if current_class.entity_created_before_job_queued(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield (key, sum([int(value) for value in values]))
class MapReduceJobForCheckingParamNames(jobs.BaseMapReduceOneOffJobManager):
"""Test job that checks correct param_name."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
jobs.BaseMapReduceOneOffJobManager.get_mapper_param('exp_id')
class ParamNameTests(test_utils.GenericTestBase):
def test_job_raises_error_with_invalid_param_name(self):
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id_1')
exp_services.save_new_exploration('owner_id', exploration)
job_id = MapReduceJobForCheckingParamNames.create_new()
params = {
'invalid_param_name': 'exp_id_1'
}
MapReduceJobForCheckingParamNames.enqueue(
job_id, additional_job_params=params)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception, 'MapReduce task to URL .+ failed')
with assert_raises_regexp_context_manager:
self.process_and_flush_pending_tasks()
def test_job_with_correct_param_name(self):
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id_1')
exp_services.save_new_exploration('owner_id', exploration)
job_id = MapReduceJobForCheckingParamNames.create_new()
params = {
'exp_id': 'exp_id_1'
}
MapReduceJobForCheckingParamNames.enqueue(
job_id, additional_job_params=params)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
class MapReduceJobIntegrationTests(test_utils.GenericTestBase):
"""Tests MapReduce jobs end-to-end."""
def setUp(self):
"""Create an exploration so that there is something to count."""
super(MapReduceJobIntegrationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_count_all_explorations(self):
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_DEFAULT), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(jobs.get_job_output(job_id), ['[u\'sum\', 1]'])
self.assertEqual(
SampleMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
def test_base_map_reduce_job_manager_entity_classes_to_map_over_raise_error(
self):
with self.assertRaisesRegexp(
NotImplementedError,
'Classes derived from BaseMapReduceJobManager must implement '
'entity_classes_to_map_over()'):
jobs.BaseMapReduceJobManager.entity_classes_to_map_over()
def test_base_map_reduce_job_manager_map_raise_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Classes derived from BaseMapReduceJobManager must implement '
'map as a @staticmethod.'):
jobs.BaseMapReduceJobManager.map('item')
def test_base_map_reduce_job_manager_reduce_raise_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Classes derived from BaseMapReduceJobManager must implement '
'reduce as a @staticmethod'):
jobs.BaseMapReduceJobManager.reduce('key', [])
def test_raises_error_with_existing_mapper_param(self):
job_id = SampleMapReduceJobManager.create_new()
with self.assertRaisesRegexp(
Exception,
'Additional job param entity_kinds shadows an existing mapper '
'param'):
SampleMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT,
additional_job_params={'entity_kinds': ''})
class JobRegistryTests(test_utils.GenericTestBase):
"""Tests job registry."""
def test_each_one_off_class_is_subclass_of_base_job_manager(self):
for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
self.assertTrue(issubclass(klass, jobs.BaseJobManager))
def test_each_one_off_class_is_not_abstract(self):
for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
self.assertFalse(klass._is_abstract()) # pylint: disable=protected-access
def test_validity_of_each_continuous_computation_class(self):
for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
self.assertTrue(
issubclass(klass, jobs.BaseContinuousComputationManager))
event_types_listened_to = klass.get_event_types_listened_to()
self.assertTrue(isinstance(event_types_listened_to, list))
for event_type in event_types_listened_to:
self.assertTrue(isinstance(event_type, basestring))
self.assertTrue(issubclass(
event_services.Registry.get_event_class_by_type(
event_type),
event_services.BaseEventHandler))
rdc = klass._get_realtime_datastore_class() # pylint: disable=protected-access
self.assertTrue(issubclass(
rdc, jobs.BaseRealtimeDatastoreClassForContinuousComputations))
# The list of allowed base classes. This can be extended as the
# need arises, though we may also want to implement
# _get_continuous_computation_class() and
# entity_created_before_job_queued() for other base classes
# that are added to this list.
allowed_base_batch_job_classes = [
jobs.BaseMapReduceJobManagerForContinuousComputations]
self.assertTrue(any([
issubclass(klass._get_batch_job_manager_class(), superclass) # pylint: disable=protected-access
for superclass in allowed_base_batch_job_classes]))
class BaseMapReduceJobManagerForContinuousComputationsTests(
test_utils.GenericTestBase):
def test_raise_error_with_get_continuous_computation_class(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of '
'BaseMapReduceJobManagerForContinuousComputations must '
'implement the _get_continuous_computation_class() method.')):
(
jobs.BaseMapReduceJobManagerForContinuousComputations. # pylint: disable=protected-access
_get_continuous_computation_class()
)
def test_raise_error_with_post_cancel_hook(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of '
'BaseMapReduceJobManagerForContinuousComputations must '
'implement the _get_continuous_computation_class() method.')):
(
jobs.BaseMapReduceJobManagerForContinuousComputations. # pylint: disable=protected-access
_post_cancel_hook('job_id', 'cancel message')
)
def test_raise_error_with_post_failure_hook(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of '
'BaseMapReduceJobManagerForContinuousComputations must '
'implement the _get_continuous_computation_class() method.')):
(
jobs.BaseMapReduceJobManagerForContinuousComputations. # pylint: disable=protected-access
_post_failure_hook('job_id')
)
class BaseContinuousComputationManagerTests(test_utils.GenericTestBase):
def test_raise_error_with_get_event_types_listened_to(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of BaseContinuousComputationManager must implement '
'get_event_types_listened_to(). This method should return a '
'list of strings, each representing an event type that this '
'class subscribes to.')):
jobs.BaseContinuousComputationManager.get_event_types_listened_to()
def test_raise_error_with_get_realtime_datastore_class(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of BaseContinuousComputationManager must implement '
'_get_realtime_datastore_class(). This method should return '
'the datastore class to be used by the realtime layer.')):
jobs.BaseContinuousComputationManager._get_realtime_datastore_class( # pylint: disable=protected-access
)
def test_raise_error_with_get_batch_job_manager_class(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of BaseContinuousComputationManager must implement '
'_get_batch_job_manager_class(). This method should return the'
'manager class for the continuously-running batch job.')):
jobs.BaseContinuousComputationManager._get_batch_job_manager_class() # pylint: disable=protected-access
def test_raise_error_with_handle_incoming_event(self):
with self.assertRaisesRegexp(
NotImplementedError,
re.escape(
'Subclasses of BaseContinuousComputationManager must implement '
'_handle_incoming_event(...). Please check the docstring of '
'this method in jobs.BaseContinuousComputationManager for '
'important developer information.')):
jobs.BaseContinuousComputationManager._handle_incoming_event( # pylint: disable=protected-access
1, 'event_type')
class JobQueriesTests(test_utils.GenericTestBase):
"""Tests queries for jobs."""
def test_get_data_for_recent_jobs(self):
self.assertEqual(jobs.get_data_for_recent_jobs(), [])
job_id = MockJobManagerOne.create_new()
MockJobManagerOne.enqueue(job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
recent_jobs = jobs.get_data_for_recent_jobs()
self.assertEqual(len(recent_jobs), 1)
self.assertDictContainsSubset({
'id': job_id,
'status_code': jobs.STATUS_CODE_QUEUED,
'job_type': 'MockJobManagerOne',
'is_cancelable': True,
'error': None
}, recent_jobs[0])
class TwoClassesMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""A test job handler that counts entities in two datastore classes."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel, exp_models.ExplorationRightsModel]
@staticmethod
def map(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield [key, sum([int(value) for value in values])]
class TwoClassesMapReduceJobIntegrationTests(test_utils.GenericTestBase):
"""Tests MapReduce jobs using two classes end-to-end."""
def setUp(self):
"""Create an exploration so that there is something to count."""
super(TwoClassesMapReduceJobIntegrationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
# Note that this ends up creating an entry in the
# ExplorationRightsModel as well.
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_count_entities(self):
self.assertEqual(exp_models.ExplorationModel.query().count(), 1)
self.assertEqual(exp_models.ExplorationRightsModel.query().count(), 1)
job_id = TwoClassesMapReduceJobManager.create_new()
TwoClassesMapReduceJobManager.enqueue(
job_id, taskqueue_services.QUEUE_NAME_DEFAULT)
self.assertEqual(
self.count_jobs_in_taskqueue(taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
self.assertEqual(
TwoClassesMapReduceJobManager.get_output(job_id), ['[u\'sum\', 2]'])
self.assertEqual(
TwoClassesMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
class MockStartExplorationRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
count = ndb.IntegerProperty(default=0)
class MockStartExplorationMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
@classmethod
def _get_continuous_computation_class(cls):
return StartExplorationEventCounter
@classmethod
def entity_classes_to_map_over(cls):
return [stats_models.StartExplorationEventLogEntryModel]
@staticmethod
def map(item):
current_class = MockStartExplorationMRJobManager
if current_class.entity_created_before_job_queued(item):
yield (
item.exploration_id, {
'event_type': item.event_type,
})
@staticmethod
def reduce(key, stringified_values):
started_count = 0
for value_str in stringified_values:
value = ast.literal_eval(value_str)
if value['event_type'] == feconf.EVENT_TYPE_START_EXPLORATION:
started_count += 1
stats_models.ExplorationAnnotationsModel(
id=key, num_starts=started_count).put()
class StartExplorationEventCounter(jobs.BaseContinuousComputationManager):
"""A continuous-computation job that counts 'start exploration' events.
This class should only be used in tests.
"""
@classmethod
def get_event_types_listened_to(cls):
return [feconf.EVENT_TYPE_START_EXPLORATION]
@classmethod
def _get_realtime_datastore_class(cls):
return MockStartExplorationRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return MockStartExplorationMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
"""Override this method so that it does not immediately start a
new MapReduce job. Non-test subclasses should not do this.
"""
pass
@classmethod
def _handle_incoming_event(
cls, active_realtime_layer, event_type, exp_id, unused_exp_version,
unused_state_name, unused_session_id, unused_params,
unused_play_type):
def _increment_counter():
"""Increments the count, if the realtime model corresponding to the
active real-time model id exists.
"""
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
realtime_class(
id=realtime_model_id, count=1,
realtime_layer=active_realtime_layer).put()
transaction_services.run_in_transaction(_increment_counter)
# Public query method.
@classmethod
def get_count(cls, exploration_id):
"""Return the number of 'start exploration' events received.
Answers the query by combining the existing MR job output and the
active realtime_datastore_class.
"""
mr_model = stats_models.ExplorationAnnotationsModel.get(
exploration_id, strict=False)
realtime_model = cls._get_realtime_datastore_class().get(
cls.get_active_realtime_layer_id(exploration_id), strict=False)
answer = 0
if mr_model is not None:
answer += mr_model.num_starts
if realtime_model is not None:
answer += realtime_model.count
return answer
class MockMRJobManager(jobs.BaseMapReduceJobManagerForContinuousComputations):
@classmethod
def _get_continuous_computation_class(cls):
return MockContinuousComputationManager
@classmethod
def entity_classes_to_map_over(cls):
return []
class MockContinuousComputationManager(jobs.BaseContinuousComputationManager):
TIMES_RUN = 0
@classmethod
def get_event_types_listened_to(cls):
return []
@classmethod
def _get_realtime_datastore_class(cls):
return MockStartExplorationRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return MockMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
if cls.TIMES_RUN < 2:
(
super(cls, MockContinuousComputationManager)
._kickoff_batch_job_after_previous_one_ends()
)
cls.TIMES_RUN = cls.TIMES_RUN + 1
class ContinuousComputationTests(test_utils.GenericTestBase):
"""Tests continuous computations for 'start exploration' events."""
EXP_ID = 'exp_id'
ALL_CC_MANAGERS_FOR_TESTS = [
StartExplorationEventCounter, MockContinuousComputationManager]
def setUp(self):
"""Create an exploration and register the event listener manually."""
super(ContinuousComputationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID)
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_cannot_get_entity_with_invalid_id(self):
with self.assertRaisesRegexp(
ValueError, 'Invalid realtime id: invalid_entity_id'):
MockStartExplorationRealtimeModel.get('invalid_entity_id')
def test_cannot_put_realtime_class_with_invalid_id(self):
realtime_class = MockStartExplorationRealtimeModel
with self.assertRaisesRegexp(
Exception,
'Realtime layer 1 does not match realtime id '
'invalid_realtime_model_id'):
realtime_class(
id='invalid_realtime_model_id', count=1, realtime_layer=1).put()
def test_continuous_computation_workflow(self):
"""An integration test for continuous computations."""
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS
):
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
# Record an event. This will put the event in the task queue.
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id',
{}, feconf.PLAY_TYPE_NORMAL)
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 1)
# When the task queue is flushed, the data is recorded in the two
# realtime layers.
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_EVENTS), 0)
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
self.assertEqual(MockStartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID).count, 1)
self.assertEqual(MockStartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID).count, 1)
# The batch job has not run yet, so no entity for self.EXP_ID will
# have been created in the batch model yet.
with self.assertRaises(base_models.BaseModel.EntityNotFoundError):
stats_models.ExplorationAnnotationsModel.get(self.EXP_ID)
# Launch the batch computation.
StartExplorationEventCounter.start_computation()
# Data in realtime layer 0 is still there.
self.assertEqual(MockStartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID).count, 1)
# Data in realtime layer 1 has been deleted.
self.assertIsNone(MockStartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID, strict=False))
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
stats_models.ExplorationAnnotationsModel.get(
self.EXP_ID).num_starts, 1)
# The overall count is still 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# Data in realtime layer 0 has been deleted.
self.assertIsNone(MockStartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID, strict=False))
# Data in realtime layer 1 has been deleted.
self.assertIsNone(MockStartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID, strict=False))
def test_events_coming_in_while_batch_job_is_running(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS
):
# Currently no events have been recorded.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
# Enqueue the batch computation. (It is running on 0 events).
StartExplorationEventCounter._kickoff_batch_job() # pylint: disable=protected-access
# Record an event while this job is in the queue. Simulate
# this by directly calling on_incoming_event(), because using
# StartExplorationEventHandler.record() would just put the event
# in the task queue, which we don't want to flush yet.
event_services.StartExplorationEventHandler._handle_event( # pylint: disable=protected-access
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id',
{}, feconf.PLAY_TYPE_NORMAL)
StartExplorationEventCounter.on_incoming_event(
event_services.StartExplorationEventHandler.EVENT_TYPE,
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id',
{}, feconf.PLAY_TYPE_NORMAL)
# The overall count is now 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# Finish the job.
self.process_and_flush_pending_tasks()
# When the batch job completes, the overall count is still 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# The batch job result should still be 0, since the event arrived
# after the batch job started.
with self.assertRaises(base_models.BaseModel.EntityNotFoundError):
stats_models.ExplorationAnnotationsModel.get(self.EXP_ID)
def test_cannot_start_new_job_while_existing_job_still_running(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS
):
StartExplorationEventCounter.start_computation()
with self.assertRaisesRegexp(
Exception,
'Attempted to start computation StartExplorationEventCounter, '
'which is already running'):
StartExplorationEventCounter.start_computation()
self.process_and_flush_pending_tasks()
StartExplorationEventCounter.stop_computation('admin_user_id')
def test_get_continuous_computations_info_with_existing_model(self):
job_models.ContinuousComputationModel(
id='StartExplorationEventCounter').put()
continuous_computations_data = jobs.get_continuous_computations_info(
[StartExplorationEventCounter])
expected_continuous_computations_data = [{
'active_realtime_layer_index': 0,
'computation_type': 'StartExplorationEventCounter',
'status_code': 'idle',
'is_startable': True,
'is_stoppable': False,
'last_finished_msec': None,
'last_started_msec': None,
'last_stopped_msec': None
}]
self.assertEqual(
expected_continuous_computations_data, continuous_computations_data)
def test_failing_continuous_job(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
StartExplorationEventCounter.start_computation()
status = StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
with self.swap(logging, 'error', _mock_logging_function):
StartExplorationEventCounter.on_batch_job_failure()
self.run_but_do_not_flush_pending_tasks()
StartExplorationEventCounter.stop_computation('admin_user_id')
self.assertEqual(
observed_log_messages, ['Job StartExplorationEventCounter failed.'])
def test_cancelling_continuous_job(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
StartExplorationEventCounter.start_computation()
status = StartExplorationEventCounter.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
with self.swap(logging, 'info', _mock_logging_function):
StartExplorationEventCounter.on_batch_job_canceled()
self.run_but_do_not_flush_pending_tasks()
StartExplorationEventCounter.stop_computation('admin_user_id')
self.assertEqual(
observed_log_messages,
['Job StartExplorationEventCounter canceled.'])
def test_kickoff_batch_job_after_previous_one_ends(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS
):
self.assertEqual(MockContinuousComputationManager.TIMES_RUN, 0)
MockContinuousComputationManager.start_computation()
(
MockContinuousComputationManager # pylint: disable=protected-access
._kickoff_batch_job_after_previous_one_ends()
)
status = MockContinuousComputationManager.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING)
self.run_but_do_not_flush_pending_tasks()
MockContinuousComputationManager.stop_computation('admin_user_id')
status = MockContinuousComputationManager.get_status_code()
self.assertEqual(
status, job_models.CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE)
self.assertEqual(MockContinuousComputationManager.TIMES_RUN, 1)
# TODO(sll): When we have some concrete ContinuousComputations running in
# production, add an integration test to ensure that the registration of event
# handlers in the main codebase is happening correctly.
|
the-stack_106_23712 | from os import path, mkdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import tensorflow as tf
tf.set_random_seed(1)
import timeit
import cv2
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler #, TensorBoard
from models import get_densenet121_unet_softmax, dice_coef_rounded_ch0, dice_coef_rounded_ch1, schedule_steps, softmax_dice_loss
import tensorflow.keras.backend as K
import pandas as pd
from tqdm import tqdm
from transforms import aug_mega_hardcore
from tensorflow.keras import metrics
from abc import abstractmethod
from tensorflow.keras.preprocessing.image import Iterator
import time
from skimage import measure
from skimage.morphology import square, erosion, dilation, watershed
from skimage.filters import median
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
data_folder = path.join('..', 'data')
masks_folder = path.join(data_folder, 'masks_all')
images_folder = path.join(data_folder, 'images_all')
labels_folder = path.join(data_folder, 'labels_all')
models_folder = 'nn_models'
input_shape = (256, 256)
df = pd.read_csv(path.join(data_folder, 'folds.csv'))
all_ids = []
all_images = []
all_masks = []
all_labels = []
all_good4copy = []
def preprocess_inputs(x):
x = np.asarray(x, dtype='float32')
x /= 127.5
x -= 1.
return x
def bgr_to_lab(img):
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
lab = clahe.apply(lab[:, :, 0])
if lab.mean() > 127:
lab = 255 - lab
return lab[..., np.newaxis]
def create_mask(labels):
labels = measure.label(labels, neighbors=8, background=0)
tmp = dilation(labels > 0, square(9))
tmp2 = watershed(tmp, labels, mask=tmp, watershed_line=True) > 0
tmp = tmp ^ tmp2
tmp = dilation(tmp, square(7))
msk = (255 * tmp).astype('uint8')
props = measure.regionprops(labels)
msk0 = 255 * (labels > 0)
msk0 = msk0.astype('uint8')
msk1 = np.zeros_like(labels, dtype='bool')
max_area = np.max([p.area for p in props])
for y0 in range(labels.shape[0]):
for x0 in range(labels.shape[1]):
if not tmp[y0, x0]:
continue
if labels[y0, x0] == 0:
if max_area > 4000:
sz = 6
else:
sz = 3
else:
sz = 3
if props[labels[y0, x0] - 1].area < 300:
sz = 1
elif props[labels[y0, x0] - 1].area < 2000:
sz = 2
uniq = np.unique(labels[max(0, y0-sz):min(labels.shape[0], y0+sz+1), max(0, x0-sz):min(labels.shape[1], x0+sz+1)])
if len(uniq[uniq > 0]) > 1:
msk1[y0, x0] = True
msk0[y0, x0] = 0
msk1 = 255 * msk1
msk1 = msk1.astype('uint8')
msk2 = np.zeros_like(labels, dtype='uint8')
msk = np.stack((msk0, msk1, msk2))
msk = np.rollaxis(msk, 0, 3)
return msk
class BaseMaskDatasetIterator(Iterator):
def __init__(self,
image_ids,
random_transformers=None,
batch_size=8,
shuffle=True,
seed=None
):
self.image_ids = image_ids
self.random_transformers = random_transformers
if seed is None:
seed = np.uint32(time.time() * 1000)
super(BaseMaskDatasetIterator, self).__init__(len(self.image_ids), batch_size, shuffle, seed)
@abstractmethod
def transform_mask(self, mask, image):
raise NotImplementedError
def transform_batch_y(self, batch_y):
return batch_y
def _get_batches_of_transformed_samples(self, index_array):
batch_x = []
batch_y = []
for batch_index, image_index in enumerate(index_array):
_idx = self.image_ids[image_index]
img0 = all_images[_idx].copy()
msk0 = all_masks[_idx].copy()
lbl0 = all_labels[_idx].copy()
good4copy = all_good4copy[_idx]
x0 = random.randint(0, img0.shape[1] - input_shape[1])
y0 = random.randint(0, img0.shape[0] - input_shape[0])
img = img0[y0:y0+input_shape[0], x0:x0+input_shape[1], :]
msk = msk0[y0:y0+input_shape[0], x0:x0+input_shape[1], :]
if len(good4copy) > 0 and random.random() > 0.75:
num_copy = random.randrange(1, min(6, len(good4copy)+1))
lbl_max = lbl0.max()
for i in range(num_copy):
lbl_max += 1
l_id = random.choice(good4copy)
lbl_msk = all_labels[_idx] == l_id
row, col = np.where(lbl_msk)
y1, x1 = np.min(np.where(lbl_msk), axis=1)
y2, x2 = np.max(np.where(lbl_msk), axis=1)
lbl_msk = lbl_msk[y1:y2+1, x1:x2+1]
lbl_img = img0[y1:y2+1, x1:x2+1, :]
if random.random() > 0.5:
lbl_msk = lbl_msk[:, ::-1, ...]
lbl_img = lbl_img[:, ::-1, ...]
rot = random.randrange(4)
if rot > 0:
lbl_msk = np.rot90(lbl_msk, k=rot)
lbl_img = np.rot90(lbl_img, k=rot)
x1 = random.randint(max(0, x0 - lbl_msk.shape[1] // 2), min(img0.shape[1] - lbl_msk.shape[1], x0 + input_shape[1] - lbl_msk.shape[1] // 2))
y1 = random.randint(max(0, y0 - lbl_msk.shape[0] // 2), min(img0.shape[0] - lbl_msk.shape[0], y0 + input_shape[0] - lbl_msk.shape[0] // 2))
tmp = erosion(lbl_msk, square(5))
lbl_msk_dif = lbl_msk ^ tmp
tmp = dilation(lbl_msk, square(5))
lbl_msk_dif = lbl_msk_dif | (tmp ^ lbl_msk)
lbl0[y1:y1+lbl_msk.shape[0], x1:x1+lbl_msk.shape[1]][lbl_msk] = lbl_max
img0[y1:y1+lbl_msk.shape[0], x1:x1+lbl_msk.shape[1]][lbl_msk] = lbl_img[lbl_msk]
full_diff_mask = np.zeros_like(img0[..., 0], dtype='bool')
full_diff_mask[y1:y1+lbl_msk.shape[0], x1:x1+lbl_msk.shape[1]] = lbl_msk_dif
img0[..., 0][full_diff_mask] = median(img0[..., 0], mask=full_diff_mask)[full_diff_mask]
img0[..., 1][full_diff_mask] = median(img0[..., 1], mask=full_diff_mask)[full_diff_mask]
img0[..., 2][full_diff_mask] = median(img0[..., 2], mask=full_diff_mask)[full_diff_mask]
img = img0[y0:y0+input_shape[0], x0:x0+input_shape[1], :]
lbl = lbl0[y0:y0+input_shape[0], x0:x0+input_shape[1]]
msk = create_mask(lbl)
if 'ic100_' in all_ids[_idx] or 'gnf_' in all_ids[_idx]:
data = self.random_transformers[1](image=img[..., ::-1], mask=msk)
else:
data = self.random_transformers[0](image=img[..., ::-1], mask=msk)
img = data['image'][..., ::-1]
msk = data['mask']
msk = msk.astype('float')
msk[..., 0] = (msk[..., 0] > 127) * 1
msk[..., 1] = (msk[..., 1] > 127) * (msk[..., 0] == 0) * 1
msk[..., 2] = (msk[..., 1] == 0) * (msk[..., 0] == 0) * 1
otp = msk
img = np.concatenate([img, bgr_to_lab(img)], axis=2)
batch_x.append(img)
batch_y.append(otp)
batch_x = np.array(batch_x, dtype="float32")
batch_y = np.array(batch_y, dtype="float32")
batch_x = preprocess_inputs(batch_x)
return self.transform_batch_x(batch_x), self.transform_batch_y(batch_y)
def transform_batch_x(self, batch_x):
return batch_x
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array)
def val_data_generator(val_idx, batch_size, validation_steps):
while True:
inputs = []
outputs = []
step_id = 0
for i in val_idx:
img = all_images[i]
msk = all_masks[i].copy()
x0 = 16
y0 = 16
x1 = 16
y1 = 16
if (img.shape[1] % 32) != 0:
x0 = int((32 - img.shape[1] % 32) / 2)
x1 = (32 - img.shape[1] % 32) - x0
x0 += 16
x1 += 16
if (img.shape[0] % 32) != 0:
y0 = int((32 - img.shape[0] % 32) / 2)
y1 = (32 - img.shape[0] % 32) - y0
y0 += 16
y1 += 16
img = np.pad(img, ((y0,y1), (x0,x1), (0, 0)), 'symmetric')
msk = np.pad(msk, ((y0,y1), (x0,x1), (0, 0)), 'symmetric')
msk = msk.astype('float')
msk[..., 0] = (msk[..., 0] > 127) * 1
msk[..., 1] = (msk[..., 1] > 127) * (msk[..., 0] == 0) * 1
msk[..., 2] = (msk[..., 1] == 0) * (msk[..., 0] == 0) * 1
otp = msk
img = np.concatenate([img, bgr_to_lab(img)], axis=2)
for j in range(batch_size):
inputs.append(img)
outputs.append(otp)
if len(inputs) == batch_size:
step_id += 1
inputs = np.asarray(inputs)
outputs = np.asarray(outputs, dtype='float')
inputs = preprocess_inputs(inputs)
yield inputs, outputs
inputs = []
outputs = []
if step_id == validation_steps:
break
def is_grayscale(image):
return np.allclose(image[..., 0], image[..., 1], atol=0.001) and np.allclose(image[..., 1], image[..., 2], atol=0.001)
if __name__ == '__main__':
t0 = timeit.default_timer()
fold_nums = [0, 1, 2, 3]
if not path.isdir(models_folder):
mkdir(models_folder)
all_ids = df['img_id'].values
all_sources = df['source'].values
for i in tqdm(range(len(all_ids))):
img_id = all_ids[i]
msk = cv2.imread(path.join(masks_folder, '{0}.png'.format(img_id)), cv2.IMREAD_UNCHANGED)
img = cv2.imread(path.join(images_folder, '{0}.png'.format(img_id)), cv2.IMREAD_COLOR)
lbl = cv2.imread(path.join(labels_folder, '{0}.tif'.format(img_id)), cv2.IMREAD_UNCHANGED)
if img.shape[0] < 256 or img.shape[1] < 256:
y_pad = 0
x_pad = 0
if img.shape[1] < 256:
x_pad = 256 - img.shape[1]
if img.shape[0] < 256:
y_pad = 256 - img.shape[0]
img = np.pad(img, ((0, y_pad), (0, x_pad), (0, 0)), 'constant')
msk = np.pad(msk, ((0, y_pad), (0, x_pad), (0, 0)), 'constant')
lbl = np.pad(lbl, ((0, y_pad), (0, x_pad)), 'constant')
all_images.append(img)
all_masks.append(msk)
all_labels.append(lbl)
tmp = np.zeros_like(msk[..., 0], dtype='uint8')
tmp[1:-1, 1:-1] = msk[1:-1, 1:-1, 0]
good4copy = list(set(np.unique(lbl[lbl > 0])).symmetric_difference(np.unique(lbl[(lbl > 0) & (tmp == 0)])))
all_good4copy.append(good4copy)
batch_size = 16
val_batch = 1
polosa_id = '193ffaa5272d5c421ae02130a64d98ad120ec70e4ed97a72cdcd4801ce93b066'
for it in range(4):
if it not in fold_nums:
continue
train_idx0 = df[(df['fold'] != it) | (df['img_id'] == polosa_id)].index.values
train_groups = df[(df['fold'] != it) | (df['img_id'] == polosa_id)]['cluster'].values
train_ids = df[(df['fold'] != it) | (df['img_id'] == polosa_id)]['img_id'].values
train_idx = []
for i in range(len(train_idx0)):
rep = 1
if train_groups[i] in ['b', 'd', 'e', 'n']:
rep = 3
elif train_groups[i] in ['c', 'g', 'k', 'l']:
rep = 2
if train_ids[i] == polosa_id:
rep = 5
train_idx.extend([train_idx0[i]] * rep)
train_idx = np.asarray(train_idx)
val_idx0 = df[(df['fold'] == it)].index.values
val_groups = df[(df['fold'] == it)]['cluster'].values
val_idx = []
for i in range(len(val_idx0)):
rep = 1
if val_groups[i] in ['b', 'd', 'e', 'n']:
rep = 3
elif val_groups[i] in ['c', 'g', 'k', 'l']:
rep = 2
val_idx.extend([val_idx0[i]] * rep)
val_idx = np.asarray(val_idx)
validation_steps = len(val_idx)
steps_per_epoch = 5 * int(len(train_idx) / batch_size)
print('Training fold', it)
print('steps_per_epoch', steps_per_epoch, 'validation_steps', validation_steps)
data_gen = BaseMaskDatasetIterator(train_idx,
random_transformers=[aug_mega_hardcore((-0.25, 0.6)), aug_mega_hardcore((-0.6, 0.25))],
batch_size=batch_size,
shuffle=True,
seed=1
)
np.random.seed(it+111)
random.seed(it+111)
tf.set_random_seed(it+111)
# tbCallback = TensorBoard(log_dir="tb_logs/densenet_softmax_{0}".format(it), histogram_freq=0, write_graph=True, write_images=False)
lrSchedule = LearningRateScheduler(lambda epoch: schedule_steps(epoch, [(1e-5, 2), (3e-4, 4), (1e-4, 6)]))
model = get_densenet121_unet_softmax((None, None), weights='imagenet')
model.compile(loss=softmax_dice_loss,
optimizer=Adam(lr=3e-4, amsgrad=True),
metrics=[dice_coef_rounded_ch0, dice_coef_rounded_ch1, metrics.categorical_crossentropy])
model.fit_generator(generator=data_gen,
epochs=6, steps_per_epoch=steps_per_epoch, verbose=2,
validation_data=val_data_generator(val_idx, val_batch, validation_steps),
validation_steps=validation_steps,
callbacks=[lrSchedule],
max_queue_size=5,
workers=6)
lrSchedule = LearningRateScheduler(lambda epoch: schedule_steps(epoch, [(5e-6, 2), (2e-4, 15), (1e-4, 50), (5e-5, 70), (2e-5, 80), (1e-5, 90)]))
for l in model.layers:
l.trainable = True
model.compile(loss=softmax_dice_loss,
optimizer=Adam(lr=5e-6, amsgrad=True),
metrics=[dice_coef_rounded_ch0, dice_coef_rounded_ch1, metrics.categorical_crossentropy])
model_checkpoint = ModelCheckpoint(path.join(models_folder, 'densenet_weights_{0}.h5'.format(it)), monitor='val_loss',
save_best_only=True, save_weights_only=True, mode='min')
model.fit_generator(generator=data_gen,
epochs=90, steps_per_epoch=steps_per_epoch, verbose=2,
validation_data=val_data_generator(val_idx, val_batch, validation_steps),
validation_steps=validation_steps,
callbacks=[lrSchedule, model_checkpoint], #, tbCallback
max_queue_size=5,
workers=6)
del model
del model_checkpoint
K.clear_session()
np.random.seed(it+222)
random.seed(it+222)
tf.set_random_seed(it+222)
model = get_densenet121_unet_softmax((None, None), weights=None)
model.load_weights(path.join(models_folder, 'densenet_weights_{0}.h5'.format(it)))
lrSchedule = LearningRateScheduler(lambda epoch: schedule_steps(epoch, [(1e-6, 92), (3e-5, 100), (2e-5, 120), (1e-5, 130)]))
model.compile(loss=softmax_dice_loss,
optimizer=Adam(lr=1e-6, amsgrad=True),
metrics=[dice_coef_rounded_ch0, dice_coef_rounded_ch1, metrics.categorical_crossentropy])
model_checkpoint2 = ModelCheckpoint(path.join(models_folder, 'densenet_weights_{0}.h5'.format(it)), monitor='val_loss',
save_best_only=True, save_weights_only=True, mode='min')
model.fit_generator(generator=data_gen,
epochs=130, steps_per_epoch=steps_per_epoch, verbose=2,
validation_data=val_data_generator(val_idx, val_batch, validation_steps),
validation_steps=validation_steps,
callbacks=[lrSchedule, model_checkpoint2], #, tbCallback
max_queue_size=5,
workers=6,
initial_epoch=90)
del model
del model_checkpoint2
K.clear_session()
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60)) |
the-stack_106_23713 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.ops import math_ops
def check_num_samples(ins,
batch_size=None,
steps=None,
steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
Raises:
ValueError: In case of invalid arguments.
"""
if steps is not None and batch_size is not None:
raise ValueError(
'If ' + steps_name + ' is set, the `batch_size` must be None.')
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], 'shape'):
return int(ins[0].shape[0])
return None # Edge case where ins == [static_learning_phase]
def standardize_single_array(x):
if x is None:
return None
elif tensor_util.is_tensor(x):
return x
elif x.ndim == 1:
x = np.expand_dims(x, 1)
return x
def standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
if data is not None and hasattr(data, '__len__') and len(data):
raise ValueError('Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
try:
data = [
data[x].values
if data[x].__class__.__name__ == 'DataFrame' else data[x]
for x in names
]
except KeyError as e:
raise ValueError('No data provided for "' + e.args[0] + '". Need data '
'for each key in: ' + str(names))
elif isinstance(data, list):
if isinstance(data[0], list):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
else:
data = [
x.values if x.__class__.__name__ == 'DataFrame' else x for x in data
]
else:
data = data.values if data.__class__.__name__ == 'DataFrame' else data
data = [data]
data = [standardize_single_array(x) for x in data]
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError('Error when checking model ' + exception_prefix +
': the list of Numpy arrays that you are passing to '
'your model is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), '
'but instead got the following list of ' +
str(len(data)) + ' arrays: ' + str(data)[:200] + '...')
elif len(names) > 1:
raise ValueError(
'Error when checking model ' + exception_prefix +
': you are passing a list as input to your model, '
'but the model expects a list of ' + str(len(names)) +
' Numpy arrays instead. The list you passed was: ' + str(data)[:200])
elif len(data) == 1 and not hasattr(data[0], 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, or list/dict of '
'Numpy arrays. Found: ' + str(data)[:200] + '...')
elif len(names) == 1:
data = [np.asarray(data)]
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is not None and not tensor_util.is_tensor(data[i]):
data_shape = data[i].shape
shape = shapes[i]
if data[i].ndim != len(shape):
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have ' +
str(len(shape)) + ' dimensions, but got array '
'with shape ' + str(data_shape))
if not check_batch_axis:
data_shape = data_shape[1:]
shape = shape[1:]
for dim, ref_dim in zip(data_shape, shape):
if ref_dim != dim and ref_dim:
raise ValueError(
'Error when checking ' + exception_prefix + ': expected ' +
names[i] + ' to have shape ' + str(shape) +
' but got array with shape ' + str(data_shape))
return data
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0: # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError(
'The model has multiple outputs, so `' + weight_type + '` '
'should be either a list or a dict. '
'Provided `' + weight_type + '` type not understood: ' + str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def set_of_lengths(x):
# Returns a set with the variation between
# different shapes, with None => 0
if x is None:
return {}
else:
return set([y.shape[0] for y in x
if y is not None and not tensor_util.is_tensor(y)])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' +
str([x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' +
str([y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' +
str([w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly. This check
is purely for UX purposes.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {
losses.mean_squared_error, losses.binary_crossentropy,
losses.categorical_crossentropy
}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if y is None or loss is None or tensor_util.is_tensor(y):
continue
if loss is losses.categorical_crossentropy:
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' + str(
y.shape) + ' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
Arguments:
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
Returns:
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
def batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
Arguments:
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
Returns:
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = math_ops.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(
math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError(
'Found a sample_weight with shape' + str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError(
'Found a sample_weight array with shape ' + str(sample_weight.shape) +
' for an input with shape ' + str(y.shape) + '. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = np.argmax(y, axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(weights) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError('`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' %
(existing_classes - existing_class_weight))
return weights
else:
return None
def has_symbolic_tensors(ls):
if context.executing_eagerly():
return False
if isinstance(ls, (list, tuple)):
return any(tensor_util.is_tensor(v) for v in ls)
return tensor_util.is_tensor(ls)
def populate_metric_names(model):
for i in range(len(model.outputs)):
metrics = model.nested_metrics[i]
for metric in metrics:
base_metric_name = get_base_metric_name(metric)
add_metric_name(model, base_metric_name, i)
def get_base_metric_name(metric, weighted=False):
"""Returns the metric name given the metric function.
Arguments:
metric: Metric function name or reference.
weighted: Boolean indicating if the metric for which we are adding
names is weighted.
Returns:
a metric name.
"""
metric_name_prefix = 'weighted_' if weighted else ''
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
metric_name = metric_name_prefix + suffix
else:
metric_fn = metrics_module.get(metric)
# Get metric name as string
if hasattr(metric_fn, 'name'):
metric_name = metric_fn.name
else:
metric_name = metric_fn.__name__
metric_name = metric_name_prefix + metric_name
return metric_name
def add_metric_name(model, metric_name, index):
"""Makes the metric name unique and adds it to the model's metric name list.
If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer.
Arguments:
model: Model to which we are adding metric names.
metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'
index: The index of the model output for which the metric name is being
added.
"""
if len(model.output_names) > 1:
metric_name = '%s_%s' % (model.output_names[index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in model.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
model.metrics_names.append(metric_name)
def validate_iterator_input(x, y, sample_weight, validation_split=None):
"""Validates user input arguments when a dataset iterator is passed.
Arguments:
x: Input data. A `tf.data` dataset iterator.
y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).
Expected to be `None` when `x` is a dataset iterator.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`. Expected to be `None` when
`x` is a dataset iterator
validation_split: Float between 0 and 1. Fraction of the training data to
be used as validation data. Expected to be `None` when `x` is a dataset
iterator.
Raises:
ValueError: if argument `y` or `sample_weight` or `validation_split` are
provided by user.
"""
if y is not None:
raise ValueError('You passed a dataset iterator (%s) as input `x` to '
'your model. In that case, you should not specify '
'a target (`y`) argument, since the dataset iterator '
'generates both input data and target data. '
'Received: %s' % (x, y))
if sample_weight is not None:
raise ValueError('`sample_weight` argument is not supported when input'
' `x` is a dataset iterator. '
'Received: x=%s, sample_weight=%s' % (x, sample_weight))
if validation_split is not None and validation_split != 0.0:
raise ValueError(
'`validation_split` argument is not supported when '
'input `x` is a dataset iterator. '
'Received: x=%s, validation_split=%f' % (x, validation_split))
def check_steps_argument(input_data, steps, steps_name):
"""Validates `steps` argument based on input data's type.
The cases when `steps` value must be provided are when
1. input data passed is an iterator.
2. model was built on top of symbolic tensors, input data is not
required and is `None`.
3. input data passed is a symbolic tensor.
Arguments:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
steps_name: The public API's parameter name for `steps`.
Returns:
boolean, True if `steps` argument is required, else False.
Raises:
ValueError: if `steps` argument is required for given input data type
but not provided.
"""
is_x_iterator = (
isinstance(input_data, iterator_ops.Iterator) or
isinstance(input_data, iterator_ops.EagerIterator))
if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or
(isinstance(input_data, list) and not input_data)):
if steps is None:
input_type_str = 'iterators' if is_x_iterator else 'data tensors'
raise ValueError('When using {input_type} as input to a model, you should'
' specify the `{steps_name}` argument.'.format(
input_type=input_type_str, steps_name=steps_name))
return True
return False
|
the-stack_106_23715 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
import random
import logging
from pio_tests.integration import BaseTestCase, AppContext
from utils import AppEngine, srun, pjoin
def read_events(file_path):
RATE_ACTIONS_DELIMITER = "::"
with open(file_path, 'r') as f:
events = []
for line in f:
data = line.rstrip('\r\n').split(RATE_ACTIONS_DELIMITER)
if random.randint(0, 1) == 1:
events.append( {
"event": "rate",
"entityType": "user",
"entityId": data[0],
"targetEntityType": "item",
"targetEntityId": data[1],
"properties": { "rating" : float(data[2]) } })
else:
events.append({
"event": "buy",
"entityType": "user",
"entityId": data[0],
"targetEntityType": "item",
"targetEntityId": data[1] })
return events
class QuickStartTest(BaseTestCase):
def setUp(self):
self.log.info("Setting up the engine")
template_path = pjoin(
self.test_context.engine_directory, "recommendation-engine")
engine_json_path = pjoin(
self.test_context.data_directory, "quickstart_test/engine.json")
self.training_data_path = pjoin(
self.test_context.data_directory,
"quickstart_test/training_data.txt")
# downloading training data
srun('curl https://raw.githubusercontent.com/apache/spark/master/' \
'data/mllib/sample_movielens_data.txt --create-dirs -o {}'
.format(self.training_data_path))
app_context = AppContext(
name="MyRecommender",
template=template_path,
engine_json_path=engine_json_path)
self.app = AppEngine(self.test_context, app_context)
def runTest(self):
self.log.info("Adding a new application")
self.app.new()
event1 = {
"event" : "rate",
"entityType" : "user",
"entityId" : "u0",
"targetEntityType" : "item",
"targetEntityId" : "i0",
"properties" : {
"rating" : 5
},
"eventTime" : "2014-11-02T09:39:45.618-08:00" }
event2 = {
"event" : "buy",
"entityType" : "user",
"entityId" : "u1",
"targetEntityType" : "item",
"targetEntityId" : "i2",
"eventTime" : "2014-11-10T12:34:56.123-08:00" }
self.log.info("Sending two test events")
self.assertListEqual(
[201, 201],
[self.app.send_event(e).status_code for e in [event1, event2]])
self.log.info("Checking the number of events stored on the server")
r = self.app.get_events()
self.assertEquals(200, r.status_code)
stored_events = r.json()
self.assertEqual(2, len(stored_events))
self.log.info("Importing many events")
new_events = read_events(self.training_data_path)
for ev in new_events:
r = self.app.send_event(ev)
self.assertEqual(201, r.status_code)
self.log.info("Checking the number of events stored on the server after the update")
r = self.app.get_events(params={'limit': -1})
self.assertEquals(200, r.status_code)
stored_events = r.json()
self.assertEquals(len(new_events) + 2, len(stored_events))
self.log.info("Building an engine...")
self.app.build()
self.log.info("Training...")
self.app.train()
self.log.info("Deploying and waiting 15s for it to start...")
self.app.deploy(wait_time=15)
self.log.info("Sending a single query and checking results")
user_query = { "user": 1, "num": 4 }
r = self.app.query(user_query)
self.assertEqual(200, r.status_code)
result = r.json()
self.assertEqual(4, len(result['itemScores']))
def tearDown(self):
self.log.info("Stopping deployed engine")
self.app.stop()
self.log.info("Deleting all related data")
self.app.delete_data()
self.log.info("Removing an app")
self.app.delete()
|
the-stack_106_23716 | from __future__ import print_function
import numpy as np
from scipy.sparse.linalg.isolve.utils import make_system
from pyamg.util.linalg import norm
from warnings import warn
__all__ = ['cr']
def cr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Conjugate Residual algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
The matrix A must be Hermitian symmetric (but not necessarily definite).
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cr
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The 2-norm of the preconditioned residual is used both for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cr import cr
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cr(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# n = len(b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg.krylov._cr')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# choose tolerance for numerically zero values
# t = A.dtype.char
# eps = np.finfo(np.float).eps
# feps = np.finfo(np.single).eps
# geps = np.finfo(np.longfloat).eps
# _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
# numerically_zero = {0: feps*1e3, 1: eps*1e6,
# 2: geps*1e6}[_array_precision[t]]
# setup method
r = b - A*x
z = M*r
p = z.copy()
zz = np.inner(z.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(zz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 8
iter = 0
Az = A*z
rAz = np.inner(r.conjugate(), Az)
Ap = A*p
while True:
rAz_old = rAz
alpha = rAz / np.inner(Ap.conjugate(), Ap) # 3
x += alpha * p # 4
if np.mod(iter, recompute_r) and iter > 0: # 5
r -= alpha * Ap
else:
r = b - A*x
z = M*r
Az = A*z
rAz = np.inner(r.conjugate(), Az)
beta = rAz/rAz_old # 6
p *= beta # 7
p += z
Ap *= beta # 8
Ap += Az
iter += 1
zz = np.inner(z.conjugate(), z)
normr = np.sqrt(zz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif zz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in CR, ceasing \
iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter)
if __name__ == '__main__':
# from numpy import diag
# A = random((4,4))
# A = A*A.transpose() + diag([10,10,10,10])
# b = random((4,1))
# x0 = random((4,1))
from pyamg.gallery import stencil_grid
from numpy.random import random
import time
from pyamg.krylov._gmres import gmres
A = stencil_grid([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], (100, 100),
dtype=float, format='csr')
b = random((A.shape[0],))
x0 = random((A.shape[0],))
print('\n\nTesting CR with %d x %d 2D Laplace Matrix' %
(A.shape[0], A.shape[0]))
t1 = time.time()
r = []
(x, flag) = cr(A, b, x0, tol=1e-8, maxiter=100, residuals=r)
t2 = time.time()
print('%s took %0.3f ms' % ('cr', (t2-t1)*1000.0))
print('norm = %g' % (norm(b - A*x)))
print('info flag = %d' % (flag))
t1 = time.time()
r2 = []
(x, flag) = gmres(A, b, x0, tol=1e-8, maxiter=100, residuals=r2)
t2 = time.time()
print('%s took %0.3f ms' % ('gmres', (t2-t1)*1000.0))
print('norm = %g' % (norm(b - A*x)))
print('info flag = %d' % (flag))
# from scipy.sparse.linalg.isolve import cg as icg
# t1=time.time()
# (y,flag) = icg(A,b,x0,tol=1e-8,maxiter=100)
# t2=time.time()
# print '\n%s took %0.3f ms' % ('linalg cg', (t2-t1)*1000.0)
# print 'norm = %g'%(norm(b - A*y))
# print 'info flag = %d'%(flag)
|
the-stack_106_23718 | import os
import shutil
from bson.json_util import dumps
import numpy as np
from pax import plugin, exceptions, datastructure
from pax.formats import flat_data_formats
class TableWriter(plugin.OutputPlugin):
"""Output data to flat table formats
Convert our data structure to numpy record arrays, one for each class (Event, Peak, ReconstructedPosition, ...).
Then output to one of several output formats (see formats.py)
For each index, an extra column is added (e.g. ReconstructedPosition has an extra column 'Event', 'Peak'
and 'ReconstructedPosition', each restarting from 0 whenever the corresponding higher-level entity changes).
The timestamp, pax configuration and version number are stored in a separate table/array: pax_info.
Because numpy arrays are optimized for working with large data structures, converting/appending each instance
separately would take long. Hence, we store data in lists first, then convert those once we've collected a bunch.
Available configuration options:
- output_format: Name of output format to produce. Must be child class of TableFormat
- output_name: The name of the output file or folder, WITHOUT file extension.
- fields_to_ignore: Fields which will not be stored.
- overwrite_data: If True, overwrite if a file/directory with the same name exists
- string_data_length: Maximum length of strings in string data fields; longer strings will be truncated.
(the pax configuration is always stored fully)
- append_data: Append data to an existing file, if output format supports it
- buffer_size: Convert to numpy record arrays after every nth event.
- write_in_chunks: Write to disk every time after converting to numpy record arrays, if the output format
supports it. Else all data is kept in memory, then written to disk on shutdown.
"""
def startup(self):
# Check if user forgot to specify some fields in fields_to_ignore
if 'hits' not in self.config['fields_to_ignore'] and 'all_hits' not in self.config['fields_to_ignore']:
raise ValueError("You must ignore either (peak.)hits or (event.)all_hits to avoid duplicating"
"the hit info in the tabular output.")
if 'sum_waveforms' not in self.config['fields_to_ignore']:
self.log.warning("You did not ignore the (event.)sum_waveforms field. This means you're trying to dump the"
"entire event sum waveform to the tabular output. "
"I'll try, but if it fails, you have been warned...")
if 'raw_data' not in self.config['fields_to_ignore']:
self.log.warning("You did not ignore the (pulse.)raw_data field. This means you're trying to dump the"
"entire raw data for every pulse to the tabular output!!! "
"I'll try, but if it fails, you have been warned...")
metadata_dump = dumps(self.processor.get_metadata())
# Dictionary to contain the data
# Every class in the datastructure is a key; values are dicts:
# {
# tuples : list of data tuples not yet converted to records,
# records : numpy record arrays,
# dtype : dtype of numpy record (includes field names),
# }
self.data = {
# Write pax configuration and version to pax_info dataframe
# Will be a table with one row
# If you append to an existing HDF5 file, it will make a second row
# TODO: However, it will probably crash if the configuration is a
# longer string...
'pax_info': {
'tuples': [(metadata_dump,)],
'records': None,
'dtype': [('metadata_json', 'S%d' % len(metadata_dump))]}
}
self.events_ready_for_conversion = 0
# Init the output format
self.output_format = of = flat_data_formats[self.config['output_format']](log=self.log)
if self.config['append_data'] and self.config['overwrite_data']:
raise ValueError('Invalid configuration for TableWriter: Cannot both'
' append and overwrite')
# Check if options are supported
if not of.supports_write_in_chunks and self.config['write_in_chunks']:
self.log.warning('Output format %s does not support write_in_chunks: will write all at end.' %
of.__class__.__name__)
self.config['write_in_chunks'] = False
if self.config['append_data'] and not of.supports_append:
self.log.warning('Output format %s does not support append: setting to False.')
self.config['append_data'] = False
# Append extension to outfile, if this format has one
if of.file_extension and of.file_extension != 'DIRECTORY':
self.config['output_name'] += '.' + of.file_extension
# Deal with existing files or non-existing dirs
outfile = self.config['output_name']
if os.path.exists(outfile):
if self.config['append_data'] and of.supports_append:
self.log.info('Output file/dir %s already exists: appending.' % outfile)
elif self.config['overwrite_data']:
self.log.info('Output file/dir %s already exists, and you '
'wanted to overwrite, so deleting it!' % outfile)
if self.output_format.file_extension == 'DIRECTORY':
self.log.warning('Deleting recursively %s...',
outfile)
shutil.rmtree(outfile) # Deletes directory and contents
os.mkdir(outfile)
else:
os.remove(outfile)
else:
raise exceptions.OutputFileAlreadyExistsError('%s already exists, and you did not specify append or '
'overwrite...' % outfile)
elif of.file_extension is 'DIRECTORY':
# We are a dir output format: dir must exist
os.mkdir(outfile)
# Open the output file
self.log.info("Opening output file/directory %s" % self.config['output_name'])
self.output_format.open(self.config['output_name'], mode='w')
def write_event(self, event):
"""Receive event and determine what to do with it
Store all the event data internally, write to disk when appropriate.
This function follows the plugin API.
"""
self._model_to_tuples(event,
index_fields=[('Event', event.event_number), ])
self.events_ready_for_conversion += 1
if self.events_ready_for_conversion >= self.config['buffer_size']:
self._convert_to_records()
if self.config['write_in_chunks']:
self._write_to_disk()
def _convert_to_records(self):
"""Convert buffer data to numpy record arrays
"""
for dfname in self.data.keys():
self.log.debug("Converting %s " % dfname)
# Set index at which next set of tuples begins
self.data[dfname]['first_index'] = len(self.data[dfname]['tuples']) + \
self.data[dfname].get('first_index', 0)
# Convert tuples to records
newrecords = np.array(self.data[dfname]['tuples'],
self.data[dfname]['dtype'])
# Clear tuples. Enjoy the freed memory.
self.data[dfname]['tuples'] = []
# Append new records
if self.data[dfname]['records'] is None:
self.data[dfname]['records'] = newrecords
else:
self.data[dfname]['records'] = np.concatenate((self.data[dfname]['records'],
newrecords))
self.events_ready_for_conversion = 0
def _write_to_disk(self):
"""Write buffered data to disk
"""
self.log.debug("Writing to disk...")
# If any records are present, call output format to write records to
# disk
if 'Event' not in self.data:
# The processor crashed, don't want to make things worse!
self.log.warning('No events to write: did you crash pax?')
return
if self.data['Event']['records'] is not None:
self.output_format.write_data({k: v['records'] for k,
v in self.data.items()})
# Delete records we've just written to disk
for d in self.data.keys():
self.data[d]['records'] = None
def shutdown(self):
# hasattr check is needed to prevent extra error if pax crashes before the plugin runs
if hasattr(self, 'output_format'):
if self.events_ready_for_conversion:
self._convert_to_records()
self._write_to_disk()
self.output_format.close()
def get_index_of(self, mname):
# Returns index +1 of last last entry in self.data[mname]. Returns 0
# if no mname seen before.
if mname not in self.data:
return 0
else:
return self.data[mname]['first_index'] + len(self.data[mname]['tuples'])
def _model_to_tuples(self, m, index_fields):
"""Convert one of our data model instances to a tuple while storing its field names & dtypes,
handling subcollections recursively, keeping track of index hierarchy
:param m: instance to convert
:param index_fields: list of (index_field_name, value) tuples denoting multi-index trail
"""
# List to contain data from this model, will be made into tuple later
m_name = m.__class__.__name__
m_indices = [x[1] for x in index_fields]
m_data = []
# Have we seen this model before? If not, initialize stuff
first_time_seen = False
if m_name not in self.data:
self.data[m_name] = {
'tuples': [],
'records': None,
# Initialize dtype with the index fields
'dtype': [(x[0], np.int64) for x in index_fields],
'index_depth': len(m_indices),
'first_index': 0
}
first_time_seen = True
for field_name, field_value in m.get_fields_data():
if field_name in self.config['fields_to_ignore']:
continue
if field_name is "sum_waveforms":
# Hack to prepare SumWaveform object data to be stored as hdf5
field_value_tmp = []
def _set_attr(old_obj, new_obj):
"""
Set attributes from SumWaveform instance onto
new instance of metaclass SumWaveform_x and
its subclass.
"""
# Constant WaveForm array size
const_array_size = 250000
# Fields to make constant
fields_to_extend = ['samples']
attr = old_obj.__dict__
for key, val in attr.items():
if key in fields_to_extend:
extend_with = const_array_size - len(val)
if extend_with > 0:
val = np.pad(val, (0,extend_with), 'constant', constant_values=np.nan)
setattr(new_obj, key, val)
# This is needed for get_fields_data() to work properly
# for the new metaclass
setattr(new_obj.__class__, key, None)
return new_obj
def _set_class(class_name, old_obj):
"""
Create metaclass from SumWaveform instance with all
attributes set to those of the SumWaveform instance.
"""
metaClass = type(class_name, (type(old_obj),), {})
new_obj = _set_attr(old_obj, metaClass())
return new_obj
# Iterate over SumWaveform objects in field_value
# and replace field_value with instances of the metaclass
for m_sw in field_value:
new_name = type(m_sw).__name__ + '_%s' % m_sw.name
m_sw_tmp = _set_class(new_name, m_sw)
field_value_tmp.append(m_sw_tmp)
field_value = field_value_tmp
if isinstance(field_value, list):
# This is a model collection field.
# Get its type (can't get from the list itself, could be empty)
child_class_name = m.get_list_field_info()[field_name]
# Store the absolute start index & number of children
child_start = self.get_index_of(child_class_name)
n_children = len(field_value)
if first_time_seen:
# Add data types for n_x (unless already present, e.g. peak.n_hits) and x_start field names.
# Will have int type.
if not hasattr(m, 'n_%s' % field_name):
self.data[m_name]['dtype'].append(self._numpy_field_dtype('n_%s' % field_name, 0))
self.data[m_name]['dtype'].append(self._numpy_field_dtype('%s_start' % field_name, 0))
if not hasattr(m, 'n_%s' % field_name):
m_data.append(n_children)
m_data.append(child_start)
# We'll ship model collections off to their own tuples (later record arrays)
# Convert each child_model to a dataframe, with a new index
# appended to the index trail
for new_index, child_model in enumerate(field_value):
self._model_to_tuples(child_model,
index_fields + [(type(child_model).__name__,
new_index)])
elif isinstance(field_value, np.ndarray) and field_value.dtype.names is not None:
# Hey this is already a structured array :-) Treat like a collection field (except don't recurse)
if field_name not in self.data:
self.data[field_name] = {
'tuples': [],
'records': None,
# Initialize dtype with the index fields + every column
# in array becomes a field.... :-(
'dtype': [(x[0], np.int64) for x in index_fields] +
[(fn, field_value[fn].dtype) for fn in field_value.dtype.names],
'index_depth': len(m_indices),
}
for element in field_value.tolist():
self.data[field_name]['tuples'].append(tuple(m_indices + list(element)))
elif isinstance(field_value, np.ndarray) and not self.output_format.supports_array_fields:
# Hack for formats without array field support: NumpyArrayFields must get their own dataframe
# -- assumes field names are unique!
# dataframe columns = str(positions in the array) ('0', '1', '2', ...)
# Is this the first time we see this numpy array field?
if field_name not in self.data:
# Must be the first time we see dataframe as well
assert first_time_seen
self.data[field_name] = {
'tuples': [],
'records': None,
# Initialize dtype with the index fields + every column
# in array becomes a field.... :-(
'dtype': [(x[0], np.int64) for x in index_fields] +
[(str(i), field_value.dtype) for i in range(len(field_value))],
'index_depth': len(m_indices),
}
self.data[field_name]['tuples'].append(tuple(m_indices + field_value.tolist()))
else:
m_data.append(field_value)
if first_time_seen:
# Store this field's data type
self.data[m_name]['dtype'].append(self._numpy_field_dtype(field_name,
field_value))
# Store m_indices + m_data in self.data['tuples']
self.data[m_name]['tuples'].append(tuple(m_indices + m_data))
def _numpy_field_dtype(self, name, x):
"""Return field dtype of numpy record with field name name and value (of type of) x
"""
if name == 'metadata_dump':
return name, 'O'
if isinstance(x, int):
return name, np.int64
if isinstance(x, float):
return name, 'f'
if isinstance(x, str):
if self.output_format.prefers_python_strings:
return name, 'O'
else:
return name, 'S' + str(self.config['string_data_length'])
if isinstance(x, np.ndarray):
return name, x.dtype, x.shape
else:
# Some weird numpy type, hopefully
return name, type(x)
class TableReader(plugin.InputPlugin):
"""Read data from TableWriter for reprocessing
'Reprocessing' means: reading in old processed data, then start somewhere in middle of processing chain,
(e.g. redo classification), and finally write to new file.
Reprocessing WITHOUT reading in the individual hits is very fast. This is fine for re-doing
peak classification and anything higher-level we may come up with.
For re-doing clustering and/or peak property computation you must read in the hits, which is slow.
The speed is set by overhead in the datastructure: 1/3 of runtime due to type checking, rest due to
converting between all the internal formats). We can try to optimize this at some point.
However, for this kind of reprocessing we may eventually need to read the raw data and build a sum waveform
as well, which takes time too.
TODO: Check if all the tables / dnames we want to read in are present in the file, else give error
"""
def startup(self):
self.chunk_size = self.config['chunk_size']
self.read_hits = self.config['read_hits']
self.read_recposes = self.config['read_recposes']
self.read_interactions = self.config['read_interactions']
self.output_format = of = flat_data_formats[self.config['format']](log=self.log)
if not of.supports_read_back:
raise NotImplementedError("Output format %s does not "
"support reading data back in!" % self.config['format'])
of.open(name=self.config['input_name'], mode='r')
self.dnames = ['Event', 'Peak']
if self.read_hits:
self.dnames.append('Hit')
if self.read_recposes:
if 'ReconstructedPosition' not in of.data_types_present:
self.log.warning("You asked to read ReconstructedPosition, "
"but this file has no ReconstructedPositions!")
self.read_recposes = False
else:
self.dnames.append('ReconstructedPosition')
if self.read_interactions:
if 'Interaction' not in of.data_types_present:
self.log.warning("You asked to read interactions, but this file has no interactions!")
self.read_interactions = False
else:
self.dnames.append('Interaction')
# Dict of numpy record arrays just read from disk, waiting to be sorted
self.cache = {}
self.max_n = {x: of.n_in_data(x) for x in self.dnames}
self.number_of_events = self.max_n['Event']
self.current_pos = {x: 0 for x in self.dnames}
def get_events(self):
"""Get events from processed data source
"""
of = self.output_format
for event_i in range(self.number_of_events):
in_this_event = {}
# Check if we should fill the cache.
for dname in self.dnames: # dname -> event, peak, etc.
# Check if what is stored in the cache for 'dname' is either
# nonexistent, empty, or incomplete. If so, keep reading new
# chunks of data to populate the cache.
while dname not in self.cache or len(self.cache[dname]) == 0 \
or self.cache[dname][0]['Event'] == self.cache[dname][-1]['Event']:
# If no data of this dname left in the file, we of course
# stop filling the cache
if self.current_pos[dname] == self.max_n[dname]:
break
new_pos = min(self.max_n[dname],
self.current_pos[dname] + self.chunk_size)
new_chunk = of.read_data(dname,
self.current_pos[dname],
new_pos)
self.current_pos[dname] = new_pos
# Add new chunk to cache
bla = np.concatenate((self.cache.get(dname,
np.empty(0,
dtype=new_chunk.dtype)),
new_chunk))
self.cache[dname] = bla
# What number is the next event?
this_event_i = self.cache['Event'][0]['Event']
# Get all records belonging to this event:
for dname in self.dnames:
mask = self.cache[dname]['Event'] == this_event_i
in_this_event[dname] = self.cache[dname][mask]
# Chop records in this event from cache
inverted_mask = True ^ mask # XOR
self.cache[dname] = self.cache[dname][inverted_mask]
# Convert records to pax data
assert len(in_this_event['Event']) == 1
e_record = in_this_event['Event'][0]
peaks = in_this_event['Peak']
event = self.convert_record(datastructure.Event, e_record)
if self.config.get('read_hits_only', False):
# Read in only hits for reclustering
for hit_record in in_this_event['Hit']:
cp = self.convert_record(datastructure.Hit, hit_record)
event.all_hits.append(cp)
else:
for peak_i, p_record in enumerate(peaks):
peak = self.convert_record(datastructure.Peak, p_record)
if self.read_recposes:
for rp_record in in_this_event['ReconstructedPosition'][
in_this_event['ReconstructedPosition']['Peak'] == peak_i]:
peak.reconstructed_positions.append(
self.convert_record(datastructure.ReconstructedPosition,
rp_record)
)
if self.read_hits:
for hit_record in in_this_event['Hit'][(in_this_event['Hit']['Peak'] == peak_i)]:
cp = self.convert_record(datastructure.Hit,
hit_record)
peak.hits.append(cp)
event.all_hits.append(cp)
event.peaks.append(peak)
if self.read_interactions:
interactions = in_this_event['Interaction']
for intr_record in interactions:
event.interactions.append(self.convert_record(datastructure.Interaction, intr_record))
yield event
def convert_record(self, class_to_load_to, record):
# We defined a nice custom init for event... ahem... now we have to do
# cumbersome stuff...
if class_to_load_to == datastructure.Event:
result = datastructure.Event(n_channels=self.config['n_channels'],
start_time=record['start_time'],
stop_time=record['stop_time'],
sample_duration=record['sample_duration'])
else:
result = class_to_load_to()
for k, v in self._numpy_record_to_dict(record).items():
# If result doesn't have this attribute, ignore it
# This happens for n_peaks etc. and attributes that have been removed
if hasattr(result, k):
setattr(result, k, v)
return result
def _numpy_record_to_dict(self, record):
"""Convert a single numpy record to a dict (keys=field names, values=values)
"""
names = record.dtype.names
result = {}
for k, v in zip(names, record):
# Skip index fields, if present
if k in ('Event', 'Peak', 'Hit', 'ReconstructedPosition'):
continue
if isinstance(v, np.bytes_):
v = v.decode("utf-8")
# For py2 compatibility:
if v.__class__.__name__ == 'unicode':
v = str(v)
result[k] = v
return result
|
the-stack_106_23721 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# repeat.py
# shelly
#
"""
Repeat a command a given number of times (or, infinitely).
"""
import sys
import optparse
import subprocess
def repeat(cmd_args, n=None):
run = lambda: subprocess.Popen([' '.join(cmd_args)], stdout=sys.stdout,
stderr=sys.stderr, shell=True)
if n is None:
while True:
run()
else:
while n > 0:
run()
n -= 1
def _create_option_parser():
usage = \
"""%prog repeat [options] <command string>
Runs the given shell command over and over indefinitely, or N times with the
-n option. If the command has arguments, include it and them in a single quoted
string."""
parser = optparse.OptionParser(usage)
parser.add_option('-n', action='store', dest='n', default=None,
type='int', help='Finish after n iterations.')
return parser
def main(argv):
parser = _create_option_parser()
(options, args) = parser.parse_args(argv)
if not args:
parser.print_help()
sys.exit(1)
try:
repeat(args, n=options.n)
except (KeyboardInterrupt, IOError):
pass
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_106_23723 | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import subprocess
import sys
from pathlib import Path
import pytest
from click.testing import CliRunner
from kedro.framework.cli.project import NO_DEPENDENCY_MESSAGE
@pytest.fixture(autouse=True)
def call_mock(mocker):
return mocker.patch("kedro.framework.cli.project.call")
@pytest.fixture(autouse=True)
def python_call_mock(mocker):
return mocker.patch("kedro.framework.cli.project.python_call")
@pytest.fixture
def fake_ipython_message(mocker):
return mocker.patch("kedro.framework.cli.project.ipython_message")
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestActivateNbstripoutCommand:
@staticmethod
@pytest.fixture()
def fake_nbstripout():
"""
``nbstripout`` tries to access ``sys.stdin.buffer.readable``
on import, but it's patches by pytest.
Let's replace it by the fake!
"""
sys.modules["nbstripout"] = "fake"
yield
del sys.modules["nbstripout"]
@staticmethod
@pytest.fixture
def fake_git_repo(mocker):
return mocker.patch("subprocess.run", return_value=mocker.Mock(returncode=0))
@staticmethod
@pytest.fixture
def without_git_repo(mocker):
return mocker.patch("subprocess.run", return_value=mocker.Mock(returncode=1))
def test_install_successfully(
self, fake_kedro_cli, call_mock, fake_nbstripout, fake_git_repo
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert not result.exit_code
call_mock.assert_called_once_with(["nbstripout", "--install"])
fake_git_repo.assert_called_once_with(
["git", "rev-parse", "--git-dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def test_nbstripout_not_installed(self, fake_kedro_cli, fake_git_repo, mocker):
"""
Run activate-nbstripout target without nbstripout installed
There should be a clear message about it.
"""
mocker.patch.dict("sys.modules", {"nbstripout": None})
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert result.exit_code
assert "nbstripout is not installed" in result.stdout
def test_no_git_repo(self, fake_kedro_cli, fake_nbstripout, without_git_repo):
"""
Run activate-nbstripout target with no git repo available.
There should be a clear message about it.
"""
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert result.exit_code
assert "Not a git repository" in result.stdout
def test_no_git_executable(self, fake_kedro_cli, fake_nbstripout, mocker):
mocker.patch("subprocess.run", side_effect=FileNotFoundError)
result = CliRunner().invoke(fake_kedro_cli.cli, ["activate-nbstripout"])
assert result.exit_code
assert "Git executable not found. Install Git first." in result.stdout
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestTestCommand:
def test_happy_path(self, fake_kedro_cli, python_call_mock):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["test", "--random-arg", "value"]
)
assert not result.exit_code
python_call_mock.assert_called_once_with("pytest", ("--random-arg", "value"))
def test_pytest_not_installed(
self, fake_kedro_cli, python_call_mock, mocker, fake_repo_path
):
mocker.patch.dict("sys.modules", {"pytest": None})
result = CliRunner().invoke(
fake_kedro_cli.cli, ["test", "--random-arg", "value"]
)
expected_message = NO_DEPENDENCY_MESSAGE.format(
module="pytest", src=str(fake_repo_path / "src")
)
assert result.exit_code
assert expected_message in result.stdout
python_call_mock.assert_not_called()
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestLintCommand:
@pytest.mark.parametrize("files", [(), ("src",)])
def test_lint(
self, fake_kedro_cli, python_call_mock, files, mocker, fake_repo_path
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["lint", *files])
assert not result.exit_code, result.stdout
expected_files = files or (
str(fake_repo_path / "src/tests"),
str(fake_repo_path / "src/dummy_package"),
)
expected_calls = [
mocker.call("black", expected_files),
mocker.call("flake8", ("--max-line-length=88",) + expected_files),
mocker.call(
"isort",
("-rc", "-tc", "-up", "-fgw=0", "-m=3", "-w=88") + expected_files,
),
]
assert python_call_mock.call_args_list == expected_calls
@pytest.mark.parametrize(
"check_flag,files",
[
("-c", ()),
("--check-only", ()),
("-c", ("src",)),
("--check-only", ("src",)),
],
)
def test_lint_check_only(
self,
fake_kedro_cli,
python_call_mock,
check_flag,
mocker,
files,
fake_repo_path,
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["lint", check_flag, *files])
assert not result.exit_code, result.stdout
expected_files = files or (
str(fake_repo_path / "src/tests"),
str(fake_repo_path / "src/dummy_package"),
)
expected_calls = [
mocker.call("black", ("--check",) + expected_files),
mocker.call("flake8", ("--max-line-length=88",) + expected_files),
mocker.call(
"isort",
("-c", "-rc", "-tc", "-up", "-fgw=0", "-m=3", "-w=88") + expected_files,
),
]
assert python_call_mock.call_args_list == expected_calls
@pytest.mark.parametrize("module_name", ["flake8", "isort"])
def test_import_not_installed(
self, fake_kedro_cli, python_call_mock, module_name, mocker, fake_repo_path
):
mocker.patch.dict("sys.modules", {module_name: None})
result = CliRunner().invoke(fake_kedro_cli.cli, ["lint"])
expected_message = NO_DEPENDENCY_MESSAGE.format(
module=module_name, src=str(fake_repo_path / "src")
)
assert result.exit_code, result.stdout
assert expected_message in result.stdout
python_call_mock.assert_not_called()
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestInstallCommand:
def test_happy_path(
self, python_call_mock, call_mock, fake_kedro_cli, fake_repo_path
):
result = CliRunner().invoke(fake_kedro_cli.cli, ["install"])
assert not result.exit_code
python_call_mock.assert_called_once_with(
"pip", ["install", "-U", "-r", str(fake_repo_path / "src/requirements.txt")]
)
call_mock.assert_not_called()
def test_with_env_file(
self, python_call_mock, call_mock, fake_kedro_cli, mocker, fake_repo_path
):
# Pretend env file exists:
mocker.patch.object(Path, "is_file", return_value=True)
result = CliRunner().invoke(fake_kedro_cli.cli, ["install"])
assert not result.exit_code, result.stdout
python_call_mock.assert_called_once_with(
"pip", ["install", "-U", "-r", str(fake_repo_path / "src/requirements.txt")]
)
call_mock.assert_called_once_with(
[
"conda",
"install",
"--file",
str(fake_repo_path / "src/environment.yml"),
"--yes",
]
)
def test_windows(self, fake_kedro_cli, mocker, fake_repo_path):
mock_subprocess = mocker.patch("kedro.framework.cli.project.subprocess")
# pretend we are on Windows
mocker.patch("kedro.framework.cli.project.os").name = "nt"
result = CliRunner().invoke(fake_kedro_cli.cli, ["install"])
assert not result.exit_code, result.stdout
command = [
sys.executable,
"-m",
"pip",
"install",
"-U",
"-r",
str(fake_repo_path / "src/requirements.txt"),
]
mock_subprocess.Popen.assert_called_once_with(
command, creationflags=mock_subprocess.CREATE_NEW_CONSOLE
)
@pytest.fixture
def os_mock(mocker):
return mocker.patch("kedro.framework.cli.project.os")
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log", "os_mock")
class TestIpythonCommand:
def test_happy_path(
self, call_mock, fake_kedro_cli, fake_ipython_message, os_mock, fake_repo_path
):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["ipython", "--random-arg", "value"]
)
assert not result.exit_code, result.stdout
fake_ipython_message.assert_called_once_with()
call_mock.assert_called_once_with(["ipython", "--random-arg", "value"])
os_mock.environ.__setitem__.assert_called_once_with(
"IPYTHONDIR", str(fake_repo_path / ".ipython")
)
@pytest.mark.parametrize("help_flag", ["-h", "--help"])
def test_help(self, help_flag, call_mock, fake_kedro_cli, fake_ipython_message):
result = CliRunner().invoke(fake_kedro_cli.cli, ["ipython", help_flag])
assert not result.exit_code, result.stdout
fake_ipython_message.assert_not_called()
call_mock.assert_called_once_with(["ipython", help_flag])
@pytest.mark.parametrize("env_flag,env", [("--env", "base"), ("-e", "local")])
def test_env(
self, env_flag, env, fake_kedro_cli, call_mock, fake_repo_path, os_mock, mocker
):
"""This tests starting ipython with specific env."""
result = CliRunner().invoke(fake_kedro_cli.cli, ["ipython", env_flag, env])
assert not result.exit_code, result.stdout
calls = [
mocker.call("IPYTHONDIR", str(fake_repo_path / ".ipython")),
mocker.call("KEDRO_ENV", env),
]
os_mock.environ.__setitem__.assert_has_calls(calls)
def test_load_context_error(self, fake_kedro_cli):
result = CliRunner().invoke(
fake_kedro_cli.cli, ["ipython", "--env", "fake_env"]
)
expected_output = (
"Error: Unable to load Kedro context with environment `fake_env`. "
"Make sure it exists in the project configuration.\n"
)
assert result.exit_code
assert expected_output in result.output
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestPackageCommand:
def test_happy_path(self, call_mock, fake_kedro_cli, mocker, fake_repo_path):
result = CliRunner().invoke(fake_kedro_cli.cli, ["package"])
assert not result.exit_code, result.stdout
call_mock.assert_has_calls(
[
mocker.call(
[sys.executable, "setup.py", "clean", "--all", "bdist_egg"],
cwd=str(fake_repo_path / "src"),
),
mocker.call(
[sys.executable, "setup.py", "clean", "--all", "bdist_wheel"],
cwd=str(fake_repo_path / "src"),
),
]
)
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestBuildDocsCommand:
def test_happy_path(
self, call_mock, python_call_mock, fake_kedro_cli, mocker, fake_repo_path
):
fake_rmtree = mocker.patch("shutil.rmtree")
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-docs"])
assert not result.exit_code, result.stdout
call_mock.assert_has_calls(
[
mocker.call(
[
"sphinx-apidoc",
"--module-first",
"-o",
"docs/source",
str(fake_repo_path / "src/dummy_package"),
]
),
mocker.call(
["sphinx-build", "-M", "html", "docs/source", "docs/build", "-a"]
),
]
)
python_call_mock.assert_has_calls(
[
mocker.call("pip", ["install", str(fake_repo_path / "src/[docs]")]),
mocker.call(
"pip",
["install", "-r", str(fake_repo_path / "src/requirements.txt")],
),
mocker.call("ipykernel", ["install", "--user", "--name=dummy_package"]),
]
)
fake_rmtree.assert_called_once_with("docs/build", ignore_errors=True)
@pytest.mark.parametrize("open_flag", ["-o", "--open"])
def test_open_docs(self, open_flag, fake_kedro_cli, mocker):
mocker.patch("shutil.rmtree")
patched_browser = mocker.patch("webbrowser.open")
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-docs", open_flag])
assert not result.exit_code, result.stdout
expected_path = (Path.cwd() / "docs" / "build" / "html" / "index.html").as_uri()
patched_browser.assert_called_once_with(expected_path)
@pytest.mark.usefixtures("chdir_to_dummy_project", "patch_log")
class TestBuildReqsCommand:
def test_requirements_file_exists(
self, python_call_mock, fake_kedro_cli, mocker, fake_repo_path
):
# File exists:
mocker.patch.object(Path, "is_file", return_value=True)
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-reqs"])
assert not result.exit_code, result.stdout
assert "Requirements built!" in result.stdout
python_call_mock.assert_called_once_with(
"piptools", ["compile", str(fake_repo_path / "src" / "requirements.in")]
)
def test_requirements_file_doesnt_exist(
self, python_call_mock, fake_kedro_cli, mocker, fake_repo_path
):
# File does not exist:
# 'is_file' is called multiple times in different places
mocker.patch.object(Path, "is_file", side_effect=[True, True, False])
mocker.patch.object(Path, "read_text", return_value="fake requirements")
fake_writer = mocker.patch.object(Path, "write_text")
result = CliRunner().invoke(fake_kedro_cli.cli, ["build-reqs"])
assert not result.exit_code, result.stdout
assert "Requirements built!" in result.stdout
python_call_mock.assert_called_once_with(
"piptools", ["compile", str(fake_repo_path / "src" / "requirements.in")]
)
fake_writer.assert_called_once_with("fake requirements")
|
the-stack_106_23728 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=22
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.H.on(input_qubit[1])) # number=9
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=19
c.append(cirq.X.on(input_qubit[3])) # number=20
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.X.on(input_qubit[3])) # number=11
c.append(cirq.X.on(input_qubit[2])) # number=12
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma802.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_106_23730 | import time
import win32api
import win32con
# https://gist.github.com/chriskiehl/2906125
VK_CODES = {
'backspace': 0x08,
'tab': 0x09,
'clear': 0x0C,
'enter': 0x0D,
'shift': 0x10,
'ctrl': 0x11,
'alt': 0x12,
'pause': 0x13,
'caps_lock': 0x14,
'esc': 0x1B,
'spacebar': 0x20,
'page_up': 0x21,
'page_down': 0x22,
'end': 0x23,
'home': 0x24,
'left_arrow': 0x25,
'up_arrow': 0x26,
'right_arrow': 0x27,
'down_arrow': 0x28,
'select': 0x29,
'print': 0x2A,
'execute': 0x2B,
'print_screen': 0x2C,
'ins': 0x2D,
'del': 0x2E,
'help': 0x2F,
'0': 0x30,
'1': 0x31,
'2': 0x32,
'3': 0x33,
'4': 0x34,
'5': 0x35,
'6': 0x36,
'7': 0x37,
'8': 0x38,
'9': 0x39,
'a': 0x41,
'b': 0x42,
'c': 0x43,
'd': 0x44,
'e': 0x45,
'f': 0x46,
'g': 0x47,
'h': 0x48,
'i': 0x49,
'j': 0x4A,
'k': 0x4B,
'l': 0x4C,
'm': 0x4D,
'n': 0x4E,
'o': 0x4F,
'p': 0x50,
'q': 0x51,
'r': 0x52,
's': 0x53,
't': 0x54,
'u': 0x55,
'v': 0x56,
'w': 0x57,
'x': 0x58,
'y': 0x59,
'z': 0x5A,
'numpad_0': 0x60,
'numpad_1': 0x61,
'numpad_2': 0x62,
'numpad_3': 0x63,
'numpad_4': 0x64,
'numpad_5': 0x65,
'numpad_6': 0x66,
'numpad_7': 0x67,
'numpad_8': 0x68,
'numpad_9': 0x69,
'multiply_key': 0x6A,
'add_key': 0x6B,
'separator_key': 0x6C,
'subtract_key': 0x6D,
'decimal_key': 0x6E,
'divide_key': 0x6F,
'F1': 0x70,
'F2': 0x71,
'F3': 0x72,
'F4': 0x73,
'F5': 0x74,
'F6': 0x75,
'F7': 0x76,
'F8': 0x77,
'F9': 0x78,
'F10': 0x79,
'F11': 0x7A,
'F12': 0x7B,
'F13': 0x7C,
'F14': 0x7D,
'F15': 0x7E,
'F16': 0x7F,
'F17': 0x80,
'F18': 0x81,
'F19': 0x82,
'F20': 0x83,
'F21': 0x84,
'F22': 0x85,
'F23': 0x86,
'F24': 0x87,
'num_lock': 0x90,
'scroll_lock': 0x91,
'left_shift': 0xA0,
'right_shift ': 0xA1,
'left_control': 0xA2,
'right_control': 0xA3,
'left_menu': 0xA4,
'right_menu': 0xA5,
'browser_back': 0xA6,
'browser_forward': 0xA7,
'browser_refresh': 0xA8,
'browser_stop': 0xA9,
'browser_search': 0xAA,
'browser_favorites': 0xAB,
'browser_start_and_home': 0xAC,
'volume_mute': 0xAD,
'volume_Down': 0xAE,
'volume_up': 0xAF,
'next_track': 0xB0,
'previous_track': 0xB1,
'stop_media': 0xB2,
'play/pause_media': 0xB3,
'start_mail': 0xB4,
'select_media': 0xB5,
'start_application_1': 0xB6,
'start_application_2': 0xB7,
'attn_key': 0xF6,
'crsel_key': 0xF7,
'exsel_key': 0xF8,
'play_key': 0xFA,
'zoom_key': 0xFB,
'clear_key': 0xFE,
'+': 0xBB,
',': 0xBC,
'-': 0xBD,
'.': 0xBE,
'/': 0xBF,
'`': 0xC0,
';': 0xBA,
'[': 0xDB,
'\\': 0xDC,
']': 0xDD,
"'": 0xDE,
'`': 0xC0,
}
last_pressed_hotkeys = time.time() - 1
def get_pressed_keyboard_keys():
res = {}
for key in VK_CODES:
code = VK_CODES[key]
res[key] = win32api.GetAsyncKeyState(code) != 0
return res
def check_for_capturing_hotkeys(keyboard, toggle_capturing_hotkeys):
global last_pressed_hotkeys
now = time.time()
toggle_hotkeys_pressed = 0
for hotkey in toggle_capturing_hotkeys:
if keyboard[hotkey]:
toggle_hotkeys_pressed += 1
diff = now - last_pressed_hotkeys
if toggle_hotkeys_pressed == len(toggle_capturing_hotkeys) and diff > 1:
last_pressed_hotkeys = time.time()
return True
return False
|
the-stack_106_23731 | import os
import numpy as np
import tensorflow as tf
import random
IGNORE_LABEL = 255
IMG_MEAN = np.array((125.0, 114.4, 107.9), dtype=np.float32)
def image_scaling(img, label, edge):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=2.0, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[0]), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[1]), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
img = tf.image.resize_images(img, new_shape)
label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
edge = tf.image.resize_nearest_neighbor(tf.expand_dims(edge, 0), new_shape)
edge = tf.squeeze(edge, squeeze_dims=[0])
return img, label, edge
def image_mirroring(img, label, edge):
"""
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
"""
distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5)
mirror = tf.boolean_mask([0, 1, 2], mirror)
img = tf.reverse(img, mirror)
label = tf.reverse(label, mirror)
edge = tf.reverse(edge, mirror)
return img, label, edge
def random_resize_img_labels(image, label, resized_h, resized_w):
scale = tf.random_uniform([1], minval=0.75, maxval=1.25, dtype=tf.float32, seed=None)
h_new = tf.to_int32(tf.multiply(tf.to_float(resized_h), scale))
w_new = tf.to_int32(tf.multiply(tf.to_float(resized_w), scale))
new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1])
img = tf.image.resize_images(image, new_shape)
label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
return img, label
def resize_img_labels(image, label, resized_h, resized_w):
new_shape = tf.stack([tf.to_int32(resized_h), tf.to_int32(resized_w)])
img = tf.image.resize_images(image, new_shape)
label = tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_shape)
label = tf.squeeze(label, squeeze_dims=[0])
return img, label
def random_crop_and_pad_image_and_labels(image, label, edge, crop_h, crop_w, ignore_label=255):
"""
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
"""
label = tf.cast(label, dtype=tf.float32)
label = label - ignore_label # Needs to be subtracted and later added due to 0 padding.
edge = tf.cast(edge, dtype=tf.float32)
edge = edge - 0
combined = tf.concat([image, label, edge], 2)
image_shape = tf.shape(image)
combined_pad = tf.image.pad_to_bounding_box(combined, 0, 0, tf.maximum(crop_h, image_shape[0]),
tf.maximum(crop_w, image_shape[1]))
last_image_dim = tf.shape(image)[-1]
last_label_dim = tf.shape(label)[-1]
combined_crop = tf.random_crop(combined_pad, [crop_h, crop_w, 4 + 1])
img_crop = combined_crop[:, :, :last_image_dim]
label_crop = combined_crop[:, :, last_image_dim:last_image_dim + last_label_dim]
edge_crop = combined_crop[:, :, last_image_dim + last_label_dim:]
label_crop = label_crop + ignore_label
label_crop = tf.cast(label_crop, dtype=tf.uint8)
edge_crop = edge_crop + 0
edge_crop = tf.cast(edge_crop, dtype=tf.uint8)
# Set static shape so that tensorflow knows shape at compile time.
img_crop.set_shape((crop_h, crop_w, 3))
label_crop.set_shape((crop_h, crop_w, 1))
edge_crop.set_shape((crop_h, crop_w, 1))
return img_crop, label_crop, edge_crop
def read_labeled_image_reverse_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
masks = []
masks_rev = []
for line in f:
try:
image, mask, mask_rev = line.strip("\n").split(' ')
except ValueError: # Adhoc for test.
image = mask = mask_rev = line.strip("\n")
images.append(data_dir + image)
masks.append(data_dir + mask)
masks_rev.append(data_dir + mask_rev)
return images, masks, masks_rev
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
masks = []
for line in f:
try:
image, mask = line.strip("\n").split(' ')
except ValueError: # Adhoc for test.
image = mask = line.strip("\n")
images.append(data_dir + image)
masks.append(data_dir + mask)
return images, masks
def read_edge_list(data_dir, data_id_list):
f = open(data_id_list, 'r')
edges = []
for line in f:
edge = line.strip("\n")
edges.append(data_dir + '/edges/' + edge + '.png')
return edges
def read_images_from_disk(input_queue, input_size, random_scale,
random_mirror=False): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to random crop.
random_mirror: whether to randomly mirror the images prior
to random crop.
Returns:
Two tensors: the decoded image and its mask.
"""
img_contents = tf.read_file(input_queue[0])
label_contents = tf.read_file(input_queue[1])
edge_contents = tf.read_file(input_queue[2])
img = tf.image.decode_jpeg(img_contents, channels=3)
img_r, img_g, img_b = tf.split(value=img, num_or_size_splits=3, axis=2)
img = tf.cast(tf.concat([img_b, img_g, img_r], 2), dtype=tf.float32)
# Extract mean.
img -= IMG_MEAN
label = tf.image.decode_png(label_contents, channels=1)
edge = tf.image.decode_png(edge_contents, channels=1)
if input_size is not None:
h, w = input_size
# Randomly scale the images and labels.
if random_scale:
img, label, edge = image_scaling(img, label, edge)
# Randomly mirror the images and labels.
if random_mirror:
img, label, edge = image_mirroring(img, label, edge)
# Randomly crops the images and labels.
img, label, edge = random_crop_and_pad_image_and_labels(img, label, edge, h, w, IGNORE_LABEL)
return img, label, edge
class ImageReader(object):
'''Generic ImageReader which reads images and corresponding segmentation
masks from the disk, and enqueues them into a TensorFlow queue.
'''
def __init__(self, data_dir, data_list, data_id_list, input_size, random_scale,
random_mirror, shuffle, coord):
'''Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
data_id_list: path to the file of image id.
input_size: a tuple with (height, width) values, to which all the images will be resized.
random_scale: whether to randomly scale the images prior to random crop.
random_mirror: whether to randomly mirror the images prior to random crop.
coord: TensorFlow queue coordinator.
'''
self.data_dir = data_dir
self.data_list = data_list
self.data_id_list = data_id_list
self.input_size = input_size
self.coord = coord
self.image_list, self.label_list = read_labeled_image_list(self.data_dir, self.data_list)
self.edge_list = read_edge_list(self.data_dir, self.data_id_list)
self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string)
self.edges = tf.convert_to_tensor(self.edge_list, dtype=tf.string)
self.queue = tf.train.slice_input_producer([self.images, self.labels, self.edges], shuffle=shuffle)
self.image, self.label, self.edge = read_images_from_disk(self.queue, self.input_size, random_scale,
random_mirror)
def dequeue(self, num_elements):
'''Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.'''
batch_list = [self.image, self.label, self.edge]
image_batch, label_batch, edge_batch = tf.train.batch([self.image, self.label, self.edge], num_elements)
return image_batch, label_batch, edge_batch
class UnlabeledImageReader(object):
'''ImageReader which reads images and enqueues them into a TensorFlow queue.
'''
def __init__(self, data_dir, coord):
'''Initialise an ImageReader.
Args:
data_dir: path to the directory with images.
coord: TensorFlow queue coordinator.
'''
self.data_dir = data_dir
self.coord = coord
self.image_list = list([os.path.join(self.data_dir, f) for f in os.listdir(self.data_dir) if
f.endswith(('.png', '.jpg', '.jpeg'))])
self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
self.queue = tf.train.slice_input_producer([self.images], shuffle=False)
img_contents = tf.read_file(self.queue[0])
img = tf.image.decode_jpeg(img_contents, channels=3)
img_r, img_g, img_b = tf.split(value=img, num_or_size_splits=3, axis=2)
img = tf.cast(tf.concat([img_b, img_g, img_r], 2), dtype=tf.float32)
# Extract mean.
img -= IMG_MEAN
self.image = img
def dequeue(self, num_elements):
'''Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.'''
batch_list = [self.image]
image_batch = tf.train.batch([self.image], num_elements)
return image_batch
|
the-stack_106_23732 | # Copyright 2018-2021 Alvaro Bartolome, alvarobartt @ GitHub
# See LICENSE for details.
import json
import requests
from unidecode import unidecode
from .utils import constant as cst
from .utils.search_obj import SearchObj
from .utils.extra import random_user_agent
from lxml.html import fromstring
def search_quotes(text, products=None, countries=None, n_results=None):
"""
This function will use the Investing.com search engine so to retrieve the search results of the
introduced text. This function will create a :obj:`list` of :obj:`investpy.utils.search_obj.SearchObj`
class instances which will contain the search results so that they can be easily accessed and so
to ease the data retrieval process since it can be done calling the methods `self.retrieve_recent_data()`
or `self.retrieve_historical_data(from_date, to_date)` from each class instance, which will fill the historical
data attribute, `self.data`. The information of the financial product can also be retrieved using the
function `self.retrieve_information()`, that will also dump the information in the attribute `self.info`.
Args:
text (:obj:`str`): text to search in Investing.com among all its indexed data.
products (:obj:`list` of :obj:`str`, optional):
list with the product type filter/s to be applied to search result quotes so that they match
the filters. Possible products are: `indices`, `stocks`, `etfs`, `funds`, `commodities`, `currencies`,
`crypto`, `bonds`, `certificates` and `fxfutures`, by default this parameter is set to `None` which
means that no filter will be applied, and all product type quotes will be retrieved.
countries (:obj:`list` of :obj:`str`, optional):
list with the country name filter/s to be applied to search result quotes so that they match
the filters. Possible countries can be found in the docs, by default this paremeter is set to
`None` which means that no filter will be applied, and quotes from every country will be retrieved.
n_results (:obj:`int`, optional): number of search results to retrieve and return.
Returns:
:obj:`list` of :obj:`investpy.utils.search_obj.SearchObj` or :obj:`investpy.utils.search_obj.SearchObj`:
The resulting :obj:`list` of :obj:`investpy.utils.search_obj.SearchObj` will contained the retrieved
financial products matching the introduced text if found, otherwise a RuntimeError will be raised, so as to
let the user know that no results were found for the introduced text. But note that if the n_results value
is equal to 1, a single value will be returned, instead of a list of values.
Raises:
ValueError: raised whenever any of the introduced parameter is not valid or errored.
ConnectionError: raised whenever the connection to Investing.com failed.
RuntimeError: raised when there was an error while executing the function.
"""
if not text:
raise ValueError('ERR#0074: text parameter is mandatory and it should be a valid str.')
if not isinstance(text, str):
raise ValueError('ERR#0074: text parameter is mandatory and it should be a valid str.')
if products and not isinstance(products, list):
raise ValueError(
'ERR#0094: products filtering parameter is optional, but if specified, it must be a list of str.')
if countries and not isinstance(countries, list):
raise ValueError(
'ERR#0128: countries filtering parameter is optional, but if specified, it must be a list of str.')
if n_results and not isinstance(n_results, int):
raise ValueError(
'ERR#0088: n_results parameter is optional, but if specified, it must be an integer equal or higher than 1.')
if n_results is not None:
if n_results < 1:
raise ValueError(
'ERR#0088: n_results parameter is optional, but if specified, it must be an integer equal or higher than 1.')
if products:
try:
products = list(map(lambda product: unidecode(product.lower().strip()), products))
except:
raise ValueError("ERR#0130: the introduced products filter must be a list of str in order to be valid.")
condition = set(products).issubset(cst.PRODUCT_FILTERS.keys())
if condition is False:
# TODO: instead of printing the possible filters, reference the docs
raise ValueError('ERR#0095: products filtering parameter possible values are: \"' + ', '.join(
cst.PRODUCT_FILTERS.keys()) + '\".')
products = [cst.PRODUCT_FILTERS[product] for product in products]
else:
products = list(cst.PRODUCT_FILTERS.values())
if countries:
try:
countries = list(map(lambda country: unidecode(country.lower().strip()), countries))
except:
raise ValueError("ERR#0131: the introduced countries filter must be a list of str in order to be valid.")
condition = set(countries).issubset(cst.COUNTRY_FILTERS.keys())
if condition is False:
# TODO: instead of printing the possible filters, reference the docs
raise ValueError('ERR#0129: countries filtering parameter possible values are: \"' + ', '.join(
cst.COUNTRY_FILTERS.keys()) + '\".')
countries = [cst.COUNTRY_FILTERS[country] for country in countries]
else:
countries = list(cst.COUNTRY_FILTERS.values())
params = {
'search_text': text,
'tab': 'quotes',
'isFilter': True,
'limit': 270,
'offset': 0
}
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
url = 'https://www.investing.com/search/service/SearchInnerPage'
search_results = list()
total_results = None
while True:
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
data = req.json()
if data['total']['quotes'] == 0:
raise RuntimeError("ERR#0093: no results found on Investing.com for the introduced text.")
if total_results is None:
total_results = data['total']['quotes']
if n_results is None:
n_results = data['total']['quotes']
for quote in data['quotes']:
country, pair_type = quote['flag'], quote['pair_type']
if countries is not None:
if quote['flag'] in countries:
country = cst.FLAG_FILTERS[quote['flag']]
else:
continue
if products is not None:
if quote['pair_type'] in products:
pair_type = cst.PAIR_FILTERS[quote['pair_type']]
else:
continue
search_obj = SearchObj(id_=quote['pairId'], name=quote['name'], symbol=quote['symbol'],
country=country, tag=quote['link'],
pair_type=pair_type, exchange=quote['exchange'])
# Solved search_quotes returns unconsistent type #339
# if n_results == 1: return search_obj
if search_obj not in search_results: search_results.append(search_obj)
params['offset'] += 270
if len(search_results) >= n_results or len(search_results) >= total_results or params[
'offset'] >= total_results:
break
return search_results[:n_results]
def search_events(text, importances=None, countries=None, n_results=None):
"""
TODO
"""
if not text:
raise ValueError('ERR#0074: text parameter is mandatory and it should be a valid str.')
if not isinstance(text, str):
raise ValueError('ERR#0074: text parameter is mandatory and it should be a valid str.')
if importances and not isinstance(importances, list):
raise ValueError(
'ERR#0138: importances filtering parameter is optional, but if specified, it must be a list of str.')
if countries and not isinstance(countries, list):
raise ValueError(
'ERR#0128: countries filtering parameter is optional, but if specified, it must be a list of str.')
if n_results and not isinstance(n_results, int):
raise ValueError(
'ERR#0088: n_results parameter is optional, but if specified, it must be an integer equal or higher than 1.')
if n_results is not None:
if n_results < 1:
raise ValueError(
'ERR#0088: n_results parameter is optional, but if specified, it must be an integer equal or higher than 1.')
text_alt = text.replace(' ', '%20')
params = {
'search_text': text_alt,
'tab': 'ec_event',
'limit': 270,
'offset': 0
}
headers = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
}
# text_alt = text.replace(' ', '%20')
# text_alt = text_alt + "&tab=ec_event"
# url = "https://www.investing.com/search/?q=" + text + "&tab=ec_event"
url = 'https://www.investing.com/search/service/SearchInnerPage'
search_results = list()
total_results = None
while True:
response = requests.post(url, data=params, headers=headers)
if response.status_code != 200:
raise ConnectionError(f"ERR#0015: error {response.status_code}, try again later.")
# resp_dict = json.load(response.text)
# print(json.dumps(response.text))
events = response.json()['ec_event']
if len(events) == 0:
raise RuntimeError("ERR#0093: no results found on Investing.com for the introduced text.")
print(text)
for returned in events:
if returned["searchable"] == text:
target_url = "https://www.investing.com" + returned["link"]
response_economic = requests.post(target_url, data=params, headers=headers)
if response.status_code != 200:
raise ConnectionError(f"ERR#0015: error {response.status_code}, try again later.")
root = fromstring(response_economic.text)
table = root.xpath(".//table[contains(@class, 'genTbl openTbl ecHistoryTbl')]/tbody/tr")
for row in table:
counter = 0
for value in row.xpath("td"):
print(value.text)
counter = counter + 1
break
# def search_events(text, importances=None, countries=None, n_results=None):
# """
# TODO
# """
#
# if not text:
# raise ValueError('ERR#0074: text parameter is mandatory and it should be a valid str.')
#
# if not isinstance(text, str):
# raise ValueError('ERR#0074: text parameter is mandatory and it should be a valid str.')
#
# if importances and not isinstance(importances, list):
# raise ValueError('ERR#0138: importances filtering parameter is optional, but if specified, it must be a list of str.')
#
# if countries and not isinstance(countries, list):
# raise ValueError('ERR#0128: countries filtering parameter is optional, but if specified, it must be a list of str.')
#
# if n_results and not isinstance(n_results, int):
# raise ValueError('ERR#0088: n_results parameter is optional, but if specified, it must be an integer equal or higher than 1.')
#
# if n_results is not None:
# if n_results < 1:
# raise ValueError('ERR#0088: n_results parameter is optional, but if specified, it must be an integer equal or higher than 1.')
#
# params = {
# 'search_text': text,
# 'tab': 'ec_event',
# 'limit': 270,
# 'offset': 0
# }
#
# headers = {
# "User-Agent": random_user_agent(),
# "X-Requested-With": "XMLHttpRequest",
# "Accept": "text/html",
# "Accept-Encoding": "gzip, deflate, br",
# "Connection": "keep-alive",
# }
#
# print(text)
# text_alt = text.replace(' ', '%20')
# text_alt = text_alt + "&tab=ec_event"
# url = "https://www.investing.com/search/?q=" + text_alt
# print(url)
#
# # search_results = list()
# #
# # total_results = None
#
# # while True:
# response = requests.post(url, data=params, headers=headers)
# tester = fromstring(response.text)
# print(tester)
|
the-stack_106_23735 | # -*- coding: utf-8 -*-
from .buffer import Buffer
from .timeout import Timeout
from .. import context, term, atexit
from ..util import misc, fiddling
from ..context import context
import re, threading, sys, time, subprocess, logging, string
log = logging.getLogger(__name__)
class tube(Timeout):
"""
Container of all the tube functions common to sockets, TTYs and SSH connetions.
"""
#: Delimiter to use for :meth:`sendline`, :meth:`recvline`,
#: and related functions.
newline = '\n'
def __init__(self, timeout=None):
# assert type(self) == tube
# assert isinstance(self, tube), (id(type(self)), id(tube))
super(tube, self).__init__(timeout)
self.buffer = Buffer()
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = 2**20, timeout = None):
r"""recv(numb = 2**31, timeout = None) -> str
Receives up to `numb` bytes of data from the tube, and returns
as soon as any quantity of data is available.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> # Fake a data source
>>> t.recv_raw = lambda n: 'Hello, world'
>>> t.recv() == 'Hello, world'
True
>>> t.unrecv('Woohoo')
>>> t.recv() == 'Woohoo'
True
>>> context.log_level = 'debug'
>>> _ = t.recv() # doctest: +ELLIPSIS
[...] Received 0xc bytes:
'Hello, world'
>>> context.clear()
"""
return self._recv(numb, timeout) or ''
def unrecv(self, data):
"""unrecv(data)
Puts the specified data back at the beginning of the receive
buffer.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: 'hello'
>>> t.recv()
'hello'
>>> t.recv()
'hello'
>>> t.unrecv('world')
>>> t.recv()
'world'
>>> t.recv()
'hello'
"""
self.buffer.unget(data)
def _fillbuffer(self, timeout = None):
"""_fillbuffer(timeout = None)
Fills the internal buffer from the pipe, by calling
:meth:`recv_raw` exactly once.
Returns:
The bytes of data received, or ``''`` if no data was received.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda *a: 'abc'
>>> len(t.buffer)
0
>>> t._fillbuffer()
'abc'
>>> len(t.buffer)
3
"""
data = ''
with self.countdown(timeout):
data = self.recv_raw(2**20)
if data and log.isEnabledFor(logging.DEBUG):
log.debug('Received %#x bytes:' % len(data))
if all(c in string.printable for c in data):
for line in data.splitlines(True):
log.indented(repr(line), level=logging.DEBUG)
else:
log.indented(fiddling.hexdump(data))
if data:
self.buffer.add(data)
return data
def _recv(self, numb = 2**20, timeout = None):
"""_recv(numb = 2**20, timeout = None) -> str
Recieves one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
data = ''
# No buffered data, could not put anything in the buffer
# before timeout.
if not self.buffer and not self._fillbuffer(timeout):
return ''
return self.buffer.get(numb)
def recvpred(self, pred, timeout = None):
"""recvpred(pred, timeout = None) -> str
Receives one byte at a time from the tube, until ``pred(bytes)``
evaluates to True.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call, with the currently-accumulated data.
timeout(int): Timeout for the operation
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
"""
data = ''
with self.countdown(timeout):
while not pred(data):
try:
res = self.recv(1)
except:
self.unrecv(data)
return ''
if res:
data += res
else:
self.unrecv(data)
return ''
return data
def recvn(self, numb, timeout = None):
"""recvn(numb, timeout = None) -> str
Recieves exactly `n` bytes.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> data = 'hello world'
>>> t.recv_raw = lambda *a: data
>>> t.recvn(len(data)) == data
True
>>> t.recvn(len(data)+1) == data + data[0]
True
>>> t.recv_raw = lambda *a: None
>>> # The remaining data is buffered
>>> t.recv() == data[1:]
True
"""
# Keep track of how much data has been received
# It will be pasted together at the end if a
# timeout does not occur, or put into the tube buffer.
with self.countdown(timeout):
while self.timeout and len(self.buffer) < numb:
self._fillbuffer()
return self.buffer.get(numb)
def recvuntil(self, delims, drop=False, timeout = None):
"""recvuntil(delims, timeout = None) -> str
Recieve data until one of `delims` is encountered.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
arguments:
delims(str,tuple): String of delimiters characters, or list of delimiter strings.
drop(bool): Drop the ending. If ``True`` it is removed from the end of the return value.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello World!"
>>> t.recvuntil(' ')
'Hello '
>>> t.clean(0)
>>> # Matches on 'o' in 'Hello'
>>> t.recvuntil(tuple(' Wor'))
'Hello'
>>> t.clean(0)
>>> # Matches expressly full string
>>> t.recvuntil(' Wor')
'Hello Wor'
>>> t.clean(0)
>>> # Matches on full string, drops match
>>> t.recvuntil(' Wor', drop=True)
'Hello'
>>> # Try with regex special characters
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello|World"
>>> t.recvuntil('|', drop=True)
'Hello'
"""
# Convert string into list of characters
if not hasattr(delims, '__iter__'):
delims = (delims,)
def escape_regex_special(sz):
specials = '\\/.*+?|()[]{}^$'
for s in specials:
sz = sz.replace(s, '\\' + s)
return sz
delims = map(escape_regex_special, delims)
expr = re.compile('(%s)' % '|'.join(delims))
data = ''
with self.countdown(timeout):
while self.timeout:
try:
res = self.recv()
except:
self.unrecv(data)
raise
if res:
data += res
if not res:
self.unrecv(data)
return ''
match = expr.search(data)
if match:
# Re-queue evrything after the match
self.unrecv(data[match.end():])
# If we're dropping the match, return everything up to start
if drop:
return data[:match.start()]
return data[:match.end()]
return ''
def recvlines(self, numlines, keep = False, timeout = None):
r"""recvlines(numlines, keep = False, timeout = None) -> str list
Recieve up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keep(bool): Keep newlines at the end of each line (``False``).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: '\n'
>>> t.recvlines(3)
['', '', '']
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
['Foo', 'Bar', 'Baz']
>>> t.recvlines(3, True)
['Foo\n', 'Bar\n', 'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in xrange(numlines):
try:
# We must set 'keep' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keep=True, timeout=timeout)
except:
self.unrecv(''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keep:
lines = [line.rstrip('\n') for line in lines]
return lines
def recvline(self, keep = True, timeout = None):
r"""recvline(keep = True) -> str
Receive a single line from the tube.
A "line" is any sequence of bytes terminated by the byte sequence
set in :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
keep(bool): Keep the line ending (``True``).
timeout(int): Timeout
Return:
All bytes received over the tube until the first
newline ``'\n'`` is received. Optionally retains
the ending.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\r\nBaz\n'
>>> t.recvline()
'Foo\n'
>>> t.recvline()
'Bar\r\n'
>>> t.recvline(keep = False)
'Baz'
>>> t.newline = '\r\n'
>>> t.recvline(keep = False)
'Foo\nBar'
"""
return self.recvuntil(self.newline, drop = not keep, timeout = timeout)
def recvline_pred(self, pred, keep = False, timeout = None):
r"""recvline_pred(pred, keep = False) -> str
Receive data until ``pred(line)`` returns a truthy value.
Drop all other data.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call. Returns the line for which
this function returns ``True``.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Foo\nBar\nBaz\n"
>>> t.recvline_pred(lambda line: line == "Bar\n")
'Bar'
>>> t.recvline_pred(lambda line: line == "Bar\n", keep=True)
'Bar\n'
>>> t.recvline_pred(lambda line: line == 'Nope!', timeout=0.1)
''
"""
tmpbuf = Buffer()
line = ''
with self.countdown(timeout):
while self.timeout:
try:
line = self.recvline(keep=True)
except:
self.buffer.add(tmpbuf)
raise
if not line:
self.buffer.add(tmpbuf)
return ''
if pred(line):
if not keep:
line = line[:-len(self.newline)]
return line
else:
tmpbuf.add(line)
return ''
def recvline_startswith(self, delims, keep = False, timeout = None):
r"""recvline_startswith(delims, keep = False, timeout = None) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
delims(str,tuple): List of strings to search for, or string of single characters
keep(bool): Return lines with newlines if ``True``
timeout(int): Timeout, in seconds
Returns:
The first line received which starts with a delimiter in ``delims``.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_startswith(tuple('WXYZ'))
'World'
>>> t.recvline_startswith(tuple('WXYZ'), True)
'Xylophone\n'
>>> t.recvline_startswith('Wo')
'World'
"""
if not hasattr(delims, '__iter__'):
delims = (delims,)
return self.recvline_pred(lambda line: any(map(line.startswith, delims)),
keep=keep,
timeout=timeout)
def recvline_endswith(self, delims, keep = False, timeout = None):
r"""recvline_endswith(delims, keep = False, timeout = None) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
See :meth:`recvline_startswith` for more details.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\nKaboodle\n'
>>> t.recvline_endswith('r')
'Bar'
>>> t.recvline_endswith(tuple('abcde'), True)
'Kaboodle\n'
>>> t.recvline_endswith('oodle')
'Kaboodle'
"""
if not hasattr(delims, '__iter__'):
delims = (delims,)
delims = tuple(delim + self.newline for delim in delims)
return self.recvline_pred(lambda line: any(map(line.endswith, delims)),
keep=keep,
timeout=timeout)
def recvregex(self, regex, exact = False, timeout = None):
"""recvregex(regex, exact = False, timeout = None) -> str
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact = False, keep = False, timeout = None):
"""recvregex(regex, exact = False, keep = False,
timeout = None) -> str
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keep = keep, timeout = timeout)
def recvrepeat(self, timeout = None):
"""recvrepeat()
Receives data until a timeout or EOF is reached.
Examples:
>>> data = [
... 'd',
... '', # simulate timeout
... 'c',
... 'b',
... 'a',
... ]
>>> def delayrecv(n, data=data):
... return data.pop()
>>> t = tube()
>>> t.recv_raw = delayrecv
>>> t.recvrepeat(0.2)
'abc'
>>> t.recv()
'd'
"""
while self._fillbuffer(timeout=timeout):
pass
return self.buffer.get()
def recvall(self):
"""recvall() -> str
Receives data until EOF is reached.
"""
with log.waitfor('Recieving all data') as h:
l = len(self.buffer)
with self.local('inf'):
data = 'yay truthy strings'
try:
while self._fillbuffer():
h.status(misc.size(len(self.buffer)))
except EOFError:
pass
h.success("Done (%s)" % misc.size(l))
self.close()
return self.buffer.get()
def send(self, data):
"""send(data)
Sends data.
If log level ``DEBUG`` is enabled, also prints out the data
received.
If it is not possible to send anymore because of a closed
connection, it raises ``exceptions.EOFError``
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.send('hello')
'hello'
"""
if log.isEnabledFor(logging.DEBUG):
log.debug('Sent %#x bytes:' % len(data))
if all(c in string.printable for c in data):
for line in data.splitlines(True):
log.indented(repr(line), level=logging.DEBUG)
else:
log.indented(fiddling.hexdump(data))
self.send_raw(data)
def sendline(self, line):
r"""sendline(data)
Shorthand for ``t.send(data + t.newline)``.
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.sendline('hello')
'hello\n'
>>> t.newline = '\r\n'
>>> t.sendline('hello')
'hello\r\n'
"""
self.send(line + self.newline)
def sendafter(self, delim, data, timeout = None):
"""sendafter(delim, data, timeout = None) -> str
A combination of ``recvuntil(delim, timeout)`` and ``send(data)``.
"""
res = self.recvuntil(delim, timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = None):
"""sendlineafter(delim, data, timeout = None) -> str
A combination of ``recvuntil(delim, timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = None):
"""sendthen(delim, data, timeout = None) -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout)
def sendlinethen(self, delim, data, timeout = None):
"""sendlinethen(delim, data, timeout = None) -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data + self.newline)
return self.recvuntil(delim, timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
log.info('Switching to interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
if cur:
sys.stdout.write(cur)
sys.stdout.flush()
except EOFError:
log.info('Got EOF while reading in interactive')
break
t = context.thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
data = sys.stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go.set()
log.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
log.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
If ``timeout`` is zero, only cached data will be cleared.
Note: If timeout is set to zero, the underlying network is
not actually polled; only the internal buffer is cleared.
Examples:
>>> t = tube()
>>> t.unrecv('clean me up')
>>> t.clean(0)
>>> len(t.buffer)
0
"""
# Clear the internal buffer early, so that _recv()
# does not loop over it and concatenate unnecessarily.
self.buffer.get()
data = 'demo'
while timeout and data:
data = self.recv(timeout = timeout)
def clean_and_log(self, timeout = 0.05):
"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs recieved
data with :meth:`pwnlib.log.info`.
Examples:
>>> def recv(n, data=['', 'hooray_data']):
... while data: return data.pop()
>>> context.log_level = 'info'
>>> t = tube()
>>> t.recv_raw = recv
>>> t.connected_raw = lambda d: True
>>> t.fileno = lambda: 1234
>>> t.clean_and_log() #doctest: +ELLIPSIS
[...] Cleaning tube (fileno = 1234):
hooray_data
>>> context.clear()
"""
if self.connected():
log.info('Cleaning tube (fileno = %d):' % self.fileno())
log.indented(self.recvrepeat(timeout = timeout))
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> import time
>>> _=(b.connect_input(a), time.sleep(0.1))
data
"""
def pump():
import sys as _sys
while self.timeout:
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if not data:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = context.thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> _=(a.connect_output(b), time.sleep(0.1))
data
"""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
return subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
"""
Shorthand for connecting multiple tubes.
See :meth:`connect_input` for more information.
Examples:
The following are equivalent ::
tube_a >> tube.b
tube_a.connect_input(tube_b)
This is useful when chaining multiple tubes ::
tube_a >> tube_b >> tube_a
tube_a.connect_input(tube_b)
tube_b.connect_input(tube_a)
"""
self.connect_input(other)
return other
def __rshift__(self, other):
"""
Inverse of the ``<<`` operator. See :meth:`__lshift__`.
See :meth:`connect_input` for more information.
"""
self.connect_output(other)
return other
def __ne__(self, other):
"""
Shorthand for connecting tubes to eachother.
The following are equivalent ::
a >> b >> a
a <> b
See :meth:`connect_input` for more information.
"""
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds.
Examples:
>>> import time
>>> t = tube()
>>> t.can_recv_raw = lambda *a: False
>>> t.can_recv()
False
>>> _=t.unrecv('data')
>>> t.can_recv()
True
>>> _=t.recv()
>>> t.can_recv()
False
"""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
Examples:
>>> t = tube()
>>> t.settimeout_raw = lambda t: None
>>> t.settimeout(3)
>>> t.timeout == 3
True
"""
self.timeout = timeout
self.settimeout_raw(self.timeout)
shutdown_directions = {
'in': 'recv',
'read': 'recv',
'recv': 'recv',
'out': 'send',
'write': 'send',
'send': 'send',
}
connected_directions = shutdown_directions.copy()
connected_directions['any'] = 'any'
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Args:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
Examples:
>>> def p(x): print x
>>> t = tube()
>>> t.shutdown_raw = p
>>> _=map(t.shutdown, ('in', 'read', 'recv', 'out', 'write', 'send'))
recv
recv
recv
send
send
send
>>> t.shutdown('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.shutdown_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.shutdown_directions))
else:
self.shutdown_raw(self.shutdown_directions[direction])
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Args:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
Doctest:
>>> def p(x): print x
>>> t = tube()
>>> t.connected_raw = p
>>> _=map(t.connected, ('any', 'in', 'read', 'recv', 'out', 'write', 'send'))
any
recv
recv
recv
send
send
send
>>> t.connected('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['any', 'in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.connected_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.connected_directions))
else:
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
Examples:
.. doctest::
>>> t = tube()
>>> def p(x): print x
>>> t.close = lambda: p("Closed!")
>>> with t: pass
Closed!
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement
See :meth:`__enter__`
"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an ``exceptions.EOFError``.
"""
raise EOFError('Not implemented')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return ``exceptions.EOFError``, if it is unable to send any
more, because of a close tube.
"""
raise EOFError('Not implemented')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
raise NotImplementedError()
def timeout_change(self):
"""
Informs the raw layer of the tube that the timeout has changed.
Should not be called directly.
Inherited from :class:`Timeout`.
"""
try:
self.settimeout_raw(self.timeout)
except NotImplementedError:
pass
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
raise NotImplementedError()
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
raise NotImplementedError()
def close(self):
"""close()
Closes the tube.
"""
pass
# Ideally we could:
# raise NotImplementedError()
# But this causes issues with the unit tests.
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
raise NotImplementedError()
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
raise NotImplementedError()
|
the-stack_106_23737 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from builtins import object
from mock import MagicMock, patch
from nose.plugins.attrib import attr
from nose.tools import assert_raises, assert_false, eq_
from nose import SkipTest
from django.contrib.auth.models import User
from desktop.auth.backend import rewrite_user
from desktop.lib.fs import ProxyFS
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_permission, remove_from_group
def test_fs_selection():
make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
user = User.objects.get(username='test')
with patch('desktop.lib.fs.ProxyFS._has_access') as _has_access:
_has_access.return_value = True
s3fs, adls, hdfs, abfs = MagicMock(), MagicMock(), MagicMock(), MagicMock()
proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
proxy_fs.setuser(user)
proxy_fs.isdir('s3a://bucket/key')
s3fs.isdir.assert_called_once_with('s3a://bucket/key')
assert_false(hdfs.isdir.called)
proxy_fs.isfile('hdfs://localhost:42/user/alice/file')
hdfs.isfile.assert_called_once_with('hdfs://localhost:42/user/alice/file')
assert_false(s3fs.isfile.called)
proxy_fs.isdir('adl://net/key')
adls.isdir.assert_called_once_with('adl://net/key')
assert_false(hdfs.isdir.called)
proxy_fs.isdir('abfs://net/key')
abfs.isdir.assert_called_once_with('abfs://net/key')
assert_false(hdfs.isdir.called)
assert_raises(IOError, proxy_fs.stats, 'ftp://host')
def wrapper(mock):
def tmp(*args, **kwargs):
return mock
return tmp
def test_multi_fs_selection():
make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
user = User.objects.get(username='test')
with patch('desktop.lib.fs.ProxyFS._has_access') as _has_access:
_has_access.return_value = True
s3fs, adls, hdfs, abfs = MagicMock(), MagicMock(), MagicMock(), MagicMock()
proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
proxy_fs.setuser(user)
proxy_fs.copy('s3a://bucket1/key', 's3a://bucket2/key')
s3fs.copy.assert_called_once_with('s3a://bucket1/key', 's3a://bucket2/key')
assert_false(hdfs.copy.called)
proxy_fs.copyfile('s3a://bucket/key', 'key2')
s3fs.copyfile.assert_called_once_with('s3a://bucket/key', 'key2')
assert_false(hdfs.copyfile.called)
proxy_fs.copyfile('adl://net/key', 'key2')
adls.copyfile.assert_called_once_with('adl://net/key', 'key2')
assert_false(hdfs.copyfile.called)
proxy_fs.copyfile('abfs:/key', 'key2')
abfs.copyfile.assert_called_once_with('abfs:/key', 'key2')
assert_false(hdfs.copyfile.called)
proxy_fs.rename('/tmp/file', 'shmile')
hdfs.rename.assert_called_once_with('/tmp/file', 'shmile')
assert_false(s3fs.rename.called)
# Will be addressed in HUE-2934
assert_raises(NotImplementedError, proxy_fs.copy_remote_dir, 's3a://bucket/key', 'adl://tmp/dir') # Exception can only be thrown if scheme is specified, else default to 1st scheme
def test_constructor_given_invalid_arguments():
assert_raises(ValueError, ProxyFS, {'s3a': {}}, 'hdfs')
class MockFs(object):
def __init__(self, filebrowser_action=None):
self.user = None
self._filebrowser_action = filebrowser_action
def setuser(self, user):
self.user = user
def filebrowser_action(self):
return self._filebrowser_action
class TestFsPermissions(object):
def test_fs_permissions_regular_user(self):
user_client = make_logged_in_client(username='test', groupname='default', recreate=True, is_superuser=False)
user = User.objects.get(username='test')
s3fs, adls, hdfs, abfs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access")
proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
proxy_fs.setuser(user)
f = proxy_fs._get_fs
remove_from_group(user.username, 'has_s3')
remove_from_group(user.username, 'has_adls')
remove_from_group(user.username, 'has_abfs')
# No perms by default
assert_raises(Exception, f, 's3a://bucket')
assert_raises(Exception, f, 'S3A://bucket/key')
assert_raises(Exception, f, 'adl://net/key')
assert_raises(Exception, f, 'adl:/key')
assert_raises(Exception, f, 'abfs:/key')
f('hdfs://path')
f('/tmp')
try:
# Add perm
add_permission('test', 'has_s3', permname='s3_access', appname='filebrowser')
add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
add_permission('test', 'has_abfs', permname='abfs_access', appname='filebrowser')
f('s3a://bucket')
f('S3A://bucket/key')
f('adl://net/key')
f('adl:/key')
f('abfs:/key')
f('hdfs://path')
f('/tmp')
finally:
remove_from_group(user.username, 'has_s3')
remove_from_group(user.username, 'has_adls')
remove_from_group(user.username, 'has_abfs')
def test_fs_permissions_admin_user(self):
user_client = make_logged_in_client(username='admin', groupname='default', recreate=True, is_superuser=True)
user = User.objects.get(username='admin')
s3fs, adls, hdfs, abfs = MockFs("s3_access"), MockFs("adls_access"), MockFs(), MockFs("abfs_access")
proxy_fs = ProxyFS({'s3a': wrapper(s3fs), 'hdfs': wrapper(hdfs), 'adl': wrapper(adls), 'abfs': wrapper(abfs)}, 'hdfs')
proxy_fs.setuser(user)
f = proxy_fs._get_fs
f('s3a://bucket')
f('S3A://bucket/key')
f('adl://net/key')
f('adl:/key')
f('abfs:/key')
f('hdfs://path')
f('/tmp')
|
the-stack_106_23741 | #!/usr/bin/env python
# Copyright (c) 2018 Andy Zeng
import socket
import struct
import time
import numpy as np
import os
import sys
import math
import matplotlib.pyplot as plt
import pybullet as p
import pybullet_data
from datetime import datetime
import gym
from gym import spaces
from gym.utils import seeding
from robot import Robot
class PyBulletSim(gym.Env):
def __init__(self, gui=True, timeStep=0.01, NAgents=2, maxEpisodeLength=1000):
self._timeStep = timeStep
self._p = p
self._gui = gui
if gui:
cid = p.connect(p.SHARED_MEMORY) # What does this do???
if (cid < 0):
cid = p.connect(p.GUI) # What does this do???
else:
p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, -9.81)
p.setTimeStep(self._timeStep)
p.setRealTimeSimulation(0)
self._plane_id = p.loadURDF("plane.urdf")
self._boxId = p.loadURDF("assets/objects/cube/cube.urdf")
# how many steps to stepSimulation before punishing illegal actions
self._actionSteps = 50
self.NAgents = NAgents
#################################
##########SET UP ROBOT###########
#################################
positions = [[0.8, 0, 0], [-0.8, 0, 0], [0, 0.8, 0], [0, -0.8, 0]]
rotations = [[0, 0, np.pi], [0, 0, 0], [0, 1, 0], [0, -1, 0]]
self.robots = []
for i in range(self.NAgents):
self.robots.append(
Robot(self._timeStep, pos=positions[i], rot=rotations[i]))
maxVelocity, timeStep, Njoints = self.robots[0].getConfigs()
#################################
#######SET UP ACTION SPACE#######
#################################
action_dim = Njoints
action_high = np.array(
[maxVelocity*timeStep*self._actionSteps] * action_dim)
self.action_space = np.array(
[spaces.Box(-action_high, action_high) for _ in range(self.NAgents)])
#################################
####SET UP OBSERVATION SPACE#####
#################################
self._observation = []
# Each agents have 6 joints and box position and poses
# observation dimension for each agent
observation_dim = Njoints * self.NAgents + 7
observation_high = np.array([1] * observation_dim)
self.observation_space = np.array(
[spaces.Box(-observation_high, observation_high) for _ in range(self.NAgents)])
self.viewer = None # TODO what is this for
#################################
#####OTHER OPENAI GYM STUFF######
#################################
self._max_episode_steps = maxEpisodeLength
self._current_episode_step = 0
### TEMP FOR DEBUGGING ###
self._read_time_debug = p.addUserDebugParameter(
'real-time', 0.0, 1.0, 0.0)
self.seed() # TODO
self.reset()
def reset(self):
self.terminate_episode = False
for robot in self.robots:
robot.reset()
for _ in range(50):
p.stepSimulation()
self._current_episode_step = 0
self.resetBox()
p.stepSimulation()
self._observation = self._getObservation()
return self._observation
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _getObservation(self):
robotJoints = []
for robot in self.robots:
robotJoints.append(robot.getJoints())
observation = []
boxPos, boxRot = p.getBasePositionAndOrientation(self._boxId)
boxPos = np.array(boxPos)
boxRot = np.array(boxRot)
boxObservation = np.concatenate((boxPos, boxRot))
for i in range(len(self.robots)):
robotObservation = np.array([])
# Relative box position and rotation
boxObservation = np.concatenate(
(boxPos - self.robots[i]._pos, boxRot))
robotObservation = np.concatenate(
(robotObservation, boxObservation))
# Self Joints
robotObservation = np.concatenate(
(robotObservation, robotJoints[i]))
# Other's joints
for j in range(len(self.robots)):
if j != i:
robotObservation = np.concatenate((
robotObservation, robotJoints[j]))
observation.append(robotObservation)
return np.array(observation)
def step(self, actions):
assert len(actions) == self.NAgents, "Wrong Action Dimensions"
self._current_episode_step += 1
rewards = np.zeros(len(self.robots))
# Compute target joint state
target_joint_states = [robot.getJoints() + action
for robot, action in zip(self.robots, actions)]
# Set Robot's target joint state
for i, robot in enumerate(self.robots):
robot.setTargetJointState(target_joint_states[i])
for _ in range(self._actionSteps):
p.stepSimulation()
if p.readUserDebugParameter(self._read_time_debug) == 1.0:
time.sleep(0.01)
rewards += self._getReward()
# Punish agent if suggested illegal action
for i, robot in enumerate(self.robots):
actual_joint_state = robot.getJoints()
if not all([np.abs(actual_joint_state[joint_id]-target_joint_states[i][joint_id]) < 0.01 for joint_id in range(6)]):
rewards[i] -= 1
done = self.terminate_episode or self._current_episode_step >= self._max_episode_steps
if not done and self._gui:
self._observation = self._getObservation()
return self._observation, rewards, done, {}
def _getReward(self):
rewards = np.zeros(len(self.robots))
for i, robot in enumerate(self.robots):
touchedBox = p.getContactPoints(
self._boxId, robot._palm_body_id) != ()
# touchedGround = p.getContactPoints(
# self._plane_id, robot._palm_body_id) != ()
# touchedPalmSelf = p.getContactPoints(
# robot._robot_body_id, robot._palm_body_id) != ()
# touchedSelf = p.getContactPoints(
# robot._robot_body_id, robot._robot_body_id) != ()
if touchedBox:
rewards[i] += 1
# print("[{}] {} touched box!".format(
# datetime.now().strftime("%H:%M:%S"), i))
# if touchedPalmSelf:
# rewards[i] -= 1
# if touchedSelf:
# rewards[i] -= 1
# print("[{}] {} self collision!".format(
# int(round(time.time() * 1000)) % 100000, i))
# if touchedGround:
# rewards[i] -= 1
return rewards
def resetBox(self):
random_orientation = [
np.random.randint(2)*np.pi/2,
np.random.randint(2)*np.pi/2,
np.random.random_sample()*2*np.pi-np.pi]
p.resetBasePositionAndOrientation(
self._boxId, [0, 1*(np.random.random_sample()-0.5), 0.3], p.getQuaternionFromEuler(random_orientation))
for _ in range(10):
p.stepSimulation()
|
the-stack_106_23742 | # -*- coding: utf-8 -*-
import scrapy
# from scrapy.shell import inspect_response
from scrapy import signals
from jobcrawl.items import JobItem
from scrapy.http import HtmlResponse
from jobcrawl.selenium_scraper import DrushimScraper
from pydispatch import dispatcher
# from scrapy.xlib.pydispatch import dispatcher
# import sys
# import locale
# import codecs
import re
import datetime
class DrushimSpider(scrapy.Spider):
name = "drushim"
allowed_domains = ["drushim.co.il"]
base_url = "https://www.drushim.co.il"
scrape_url = 'https://www.drushim.co.il/jobs/search/%22%22/?ssaen=1'
start_urls = (scrape_url, )
seen_job_ids = set()
def __init__(self):
# sys.stdout = codecs.getwriter(
# locale.getpreferredencoding())(sys.stdout)
# reload(sys)
# sys.setdefaultencoding('utf-8')
self.selenium_scraper = DrushimScraper(self.scrape_url, self.logger)
dispatcher.connect(self.spider_closed, signals.spider_closed)
self.total_jobs = 0
def parse(self, response):
page = 1
for page_source in self.selenium_scraper.scrape():
response = HtmlResponse(url=self.scrape_url, body=page_source, encoding='utf-8')
page_job_count = 0
for item in self.parse_html(response):
self.total_jobs += 1
page_job_count += 1
yield item
self.logger.info("Drushim: Page %s job count = %s, total_jobs=%s", page, page_job_count, self.total_jobs)
page += 1
def parse_html(self, response):
job_container_list = response.xpath(
"//div[@class='job-item-main pb-3 job-hdr']")
for job_container in job_container_list:
job_link = job_container.xpath(
".//div[@class='flex nowrap align-self-center pc-view open-job text-center']/a/@href").extract_first()
if job_link:
job_link = "{}{}".format(self.base_url, job_link)
try:
job_id = "-".join(job_link.split("/")[-2:])
except:
job_id = ""
if not job_id or job_id in self.seen_job_ids:
continue
self.seen_job_ids.add(job_id)
try:
job_title = job_container.xpath(
".//span[@class='job-url primary--text font-weight-bold primary--text']").xpath(
"normalize-space(string())").extract_first()
except:
job_title = ""
try:
company = job_container.xpath(
".//div[@class='layout job-details-top mt-md-2 align-baseline']"
"/div[@class='flex grow-none ml-3']/p").xpath("normalize-space(string())").extract_first()
except:
company = ""
try:
company_jobs = job_container.xpath(
".//div[@class='layout job-details-top mt-md-2 align-baseline']"
"/div[@class='flex grow-none ml-3']/p/a/@href").extract_first()
except:
company_jobs = job_link
job_description = ""
job_sub_details = job_container.xpath(".//div[@class='layout job-details-sub']").xpath(
"normalize-space(string())").extract_first()
jsd_val = []
if job_sub_details:
jsd_val = job_sub_details.split("|")
country_areas = jsd_val[0].strip() if jsd_val else ""
category = jsd_val[2].strip() if jsd_val and len(jsd_val) == 4 else ""
job_post_date = jsd_val[-1].strip() if jsd_val and len(jsd_val) == 4 else ""
try:
job_post_date_num = re.findall(r'[\d]+', job_post_date)[0]
job_post_date_num = int(job_post_date_num)
if job_post_date_num:
second = 'שְׁנִיָה'.decode('utf-8')
seconds = 'שניות'.decode('utf-8')
minute = 'דַקָה'.decode('utf-8')
minutes = 'דקות'.decode('utf-8')
hour = 'שָׁעָה'.decode('utf-8')
hours = 'שעות'.decode('utf-8')
day = 'יְוֹם'.decode('utf-8')
days = 'ימים'.decode('utf-8')
# month = 'חוֹדֶשׁ'.decode('utf-8')
# months = 'חודשים'.decode('utf-8')
hms = [second, seconds, minute, minutes, hour, hours]
if day in job_post_date:
job_post_date = datetime.date.today() - \
datetime.timedelta(days=job_post_date_num)
job_post_date = job_post_date.strftime("%d/%m/%Y")
elif days in job_post_date:
job_post_date = datetime.date.today() - \
datetime.timedelta(days=job_post_date_num)
job_post_date = job_post_date.strftime("%d/%m/%Y")
elif [x for x in hms if x in job_post_date]:
job_post_date = datetime.date.today()
job_post_date = job_post_date.strftime("%d/%m/%Y")
elif job_post_date_num == 0:
job_post_date = datetime.date.today()
job_post_date = job_post_date.strftime("%d/%m/%Y")
else:
job_post_date = job_post_date
except:
job_post_date = ""
item = JobItem()
item['Job'] = {
'Site': 'Drushim',
'Company': company,
'Company_jobs': company_jobs,
'Job_id': job_id,
'Job_title': job_title,
'Job_Description': job_description,
'Job_Post_Date': job_post_date,
'Job_URL': job_link,
'Country_Areas': country_areas,
'Job_categories': category,
'AllJobs_Job_class': '',
'unique_id': 'drushim_{}'.format(job_id)
}
yield item
def spider_closed(self, spider):
self.selenium_scraper.close_driver()
|
the-stack_106_23743 | """Functions to load demo datasets."""
import io
import logging
import os
import urllib.request
from datetime import datetime, timedelta
from zipfile import ZipFile
import numpy as np
import pandas as pd
import scipy as sp
from faker import Faker
from sdv.metadata import Metadata, Table
LOGGER = logging.getLogger(__name__)
DEMO_METADATA = {
'tables': {
'users': {
'primary_key': 'user_id',
'fields': {
'user_id': {
'type': 'id',
'subtype': 'integer'
},
'country': {
'type': 'categorical'
},
'gender': {
'type': 'categorical'
},
'age': {
'type': 'numerical',
'subtype': 'integer'
}
}
},
'sessions': {
'primary_key': 'session_id',
'fields': {
'session_id': {
'type': 'id',
'subtype': 'integer'
},
'user_id': {
'ref': {
'field': 'user_id',
'table': 'users'
},
'type': 'id',
'subtype': 'integer'
},
'device': {
'type': 'categorical'
},
'os': {
'type': 'categorical'
},
'minutes': {
'type': 'numerical',
'subtype': 'integer'
}
}
},
'transactions': {
'primary_key': 'transaction_id',
'fields': {
'transaction_id': {
'type': 'id',
'subtype': 'integer'
},
'session_id': {
'ref': {
'field': 'session_id',
'table': 'sessions'
},
'type': 'id',
'subtype': 'integer'
},
'timestamp': {
'type': 'datetime',
'format': '%Y-%m-%dT%H:%M'
},
'amount': {
'type': 'numerical',
'subtype': 'float'
},
'cancelled': {
'type': 'boolean'
}
}
}
}
}
DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
DATA_URL = 'https://sdv-datasets.s3.amazonaws.com/{}.zip'
DATASETS_URL = 'https://sdv-datasets.s3.amazonaws.com/datasets.csv'
def _dtypes64(table):
for name, column in table.items():
if column.dtype == np.int32:
table[name] = column.astype('int64')
elif column.dtype == np.float32:
table[name] = column.astype('float64')
return table
def _download(dataset_name, data_path):
url = DATA_URL.format(dataset_name)
LOGGER.info('Downloading dataset {} from {}'.format(dataset_name, url))
response = urllib.request.urlopen(url)
bytes_io = io.BytesIO(response.read())
LOGGER.info('Extracting dataset into {}'.format(data_path))
with ZipFile(bytes_io) as zf:
zf.extractall(data_path)
def _get_dataset_path(dataset_name, data_path):
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(os.path.join(data_path, dataset_name)):
_download(dataset_name, data_path)
return os.path.join(data_path, dataset_name)
def _load_relational_dummy():
users = pd.DataFrame({
'user_id': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'country': ['US', 'UK', 'ES', 'UK', 'US', 'DE', 'BG', 'ES', 'FR', 'UK'],
'gender': ['M', 'F', None, 'M', 'F', 'M', 'F', None, 'F', None],
'age': [34, 23, 44, 22, 54, 57, 45, 41, 23, 30]
})
sessions = pd.DataFrame({
'session_id': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'user_id': [0, 1, 1, 2, 4, 5, 6, 6, 6, 8],
'device': ['mobile', 'tablet', 'tablet', 'mobile', 'mobile',
'mobile', 'mobile', 'tablet', 'mobile', 'tablet'],
'os': ['android', 'ios', 'android', 'android', 'ios',
'android', 'ios', 'ios', 'ios', 'ios'],
'minutes': [23, 12, 8, 13, 9, 32, 7, 21, 29, 34],
})
transactions = pd.DataFrame({
'transaction_id': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'session_id': [0, 0, 1, 3, 5, 5, 7, 8, 9, 9],
'timestamp': ['2019-01-01T12:34:32', '2019-01-01T12:42:21', '2019-01-07T17:23:11',
'2019-01-10T11:08:57', '2019-01-10T21:54:08', '2019-01-11T11:21:20',
'2019-01-22T14:44:10', '2019-01-23T10:14:09', '2019-01-27T16:09:17',
'2019-01-29T12:10:48'],
'amount': [100.0, 55.3, 79.5, 112.1, 110.0, 76.3, 89.5, 132.1, 68.0, 99.9],
'cancelled': [False, False, False, True, True, False, False, True, False, False],
})
transactions['timestamp'] = pd.to_datetime(transactions['timestamp'])
tables = {
'users': _dtypes64(users),
'sessions': _dtypes64(sessions),
'transactions': _dtypes64(transactions),
}
return Metadata(DEMO_METADATA), tables
def sample_relational_demo(size=30):
"""Sample demo data with the indicate number of rows in the parent table."""
# Users
faker = Faker()
countries = [faker.country_code() for _ in range(5)]
country = np.random.choice(countries, size=size)
gender = np.random.choice(['F', 'M', None], p=[0.5, 0.4, 0.1], size=size)
age = (
sp.stats.truncnorm.rvs(-1.2, 1.5, loc=30, scale=10, size=size).astype(int)
+ 3 * (gender == 'M')
+ 3 * (country == countries[0]).astype(int)
)
num_sessions = (
sp.stats.gamma.rvs(1, loc=0, scale=2, size=size)
* (0.8 + 0.2 * (gender == 'F'))
).round().astype(int)
users = pd.DataFrame({
'country': country,
'gender': gender,
'age': age,
'num_sessions': num_sessions
})
users.index.name = 'user_id'
# Sessions
sessions = pd.DataFrame()
for user_id, user in users.iterrows():
device_weights = [0.1, 0.4, 0.5] if user.gender == 'M' else [0.3, 0.4, 0.3]
devices = np.random.choice(
['mobile', 'tablet', 'pc'],
size=user.num_sessions,
p=device_weights
)
os = []
pc_weights = [0.6, 0.3, 0.1] if user.age > 30 else [0.2, 0.4, 0.4]
pc_os = np.random.choice(['windows', 'macos', 'linux'], p=pc_weights)
phone_weights = [0.7, 0.3] if user.age > 30 else [0.9, 0.1]
phone_os = np.random.choice(['android', 'ios'], p=phone_weights)
for device in devices:
os.append(pc_os if device == 'pc' else phone_os)
minutes = (
sp.stats.truncnorm.rvs(-3, 3, loc=30, scale=10, size=user.num_sessions)
* (1 + 0.1 * (user.gender == 'M'))
* (1 + user.age / 100)
* (1 + 0.1 * (devices == 'pc'))
)
num_transactions = (minutes / 10) * (0.5 + (user.gender == 'F'))
sessions = sessions.append(pd.DataFrame({
'user_id': np.full(user.num_sessions, int(user_id)),
'device': devices,
'os': os,
'minutes': minutes.round().astype(int),
'num_transactions': num_transactions.round().astype(int),
}), ignore_index=True)
sessions.index.name = 'session_id'
del users['num_sessions']
# Transactions
transactions = pd.DataFrame()
for session_id, session in sessions.iterrows():
size = session.num_transactions
if size:
amount_base = sp.stats.truncnorm.rvs(-2, 4, loc=100, scale=50, size=size)
is_apple = session['os'] in ('ios', 'macos')
amount_modif = np.random.random(size) * 100 * is_apple
amount = amount_base / np.random.randint(1, size + 1) + amount_modif
seconds = np.random.randint(3600 * 24 * 365)
start = datetime(2019, 1, 1) + timedelta(seconds=seconds)
timestamp = sorted([
start + timedelta(seconds=int(seconds))
for seconds in np.random.randint(60 * session.minutes, size=size)
])
cancelled = np.random.random(size=size) < (1 / (size * 2))
transactions = transactions.append(pd.DataFrame({
'session_id': np.full(session.num_transactions, int(session_id)),
'timestamp': timestamp,
'amount': amount.round(2),
'cancelled': cancelled,
}), ignore_index=True)
transactions.index.name = 'transaction_id'
del sessions['num_transactions']
tables = {
'users': _dtypes64(users.reset_index()),
'sessions': _dtypes64(sessions.reset_index()),
'transactions': _dtypes64(transactions.reset_index()),
}
return Metadata(DEMO_METADATA), tables
def _load_demo_dataset(dataset_name, data_path):
dataset_path = _get_dataset_path(dataset_name, data_path)
meta = Metadata(metadata=os.path.join(dataset_path, 'metadata.json'))
tables = {
name: _dtypes64(table)
for name, table in meta.load_tables().items()
}
return meta, tables
def load_demo(dataset_name=None, data_path=DATA_PATH, metadata=False):
"""Load relational demo data.
If a dataset name is given, it is downloaded from the sdv-datasets S3 bucket.
Otherwise, a toy dataset with three simple tables is loaded:
* users: user data including country, gender and age.
* sessions: sessions data with a foreign key to user.
* transactions: transactions data with a foreign key to sessions.
If ``metadata`` is ``True``, the output will be a tuple with a ``Metadata``
instance for the dataset and a ``tables`` dict that contains the tables loaded
as ``pandas.DataFrames``.
If ``metadata`` is ``False``, only the ``tables`` are returned.
Args:
dataset_name (str):
Dataset name to be downloaded, if ``None`` use default demo data. Defaults to ``None``.
data_path (str):
Data path to save the dataset files, only used if dataset_name is provided.
Defaults to ``DATA_PATH``.
metadata (bool):
If ``True`` return Metadata object. Defaults to ``False``.
Returns:
dict or tuple:
If ``metadata`` is ``False`` return a ``dict`` with the tables data.
If ``metadata`` is ``True`` return a ``tuple`` with Metadata and tables data.
"""
if dataset_name:
meta, tables = _load_demo_dataset(dataset_name, data_path)
else:
meta, tables = _load_relational_dummy()
if metadata:
return meta, tables
return tables
def _load_tabular_dummy():
"""Load a dummy tabular demo dataframe."""
age = np.random.randint(30, 50, 12)
age_when_joined = age - np.random.randint(0, 10, 12)
faker = Faker()
names = [faker.name() for _ in range(12)]
adresses = [faker.address() for _ in range(12)]
salary = np.random.uniform(30000, 160000, 12).round(2)
years_exp = np.random.randint(1, 6, 12)
return pd.DataFrame({
'company': ['Pear', 'Pear', 'Glasses', 'Glasses', 'Cheerper', 'Cheerper'] * 2,
'department': ['Sales', 'Design', 'AI', 'Search Engine', 'BigData', 'Support'] * 2,
'name': names,
'address': adresses,
'age': age,
'age_when_joined': age_when_joined,
'years_in_the_company': age - age_when_joined,
'salary': salary,
'prior_years_experience': years_exp,
'full_time': [1.0, 0.0, 1.0, 1.0, 0.0, 0.0] * 2,
'part_time': [0.0, 0.0, 0.0, 0.0, 1.0, 1.0] * 2,
'contractor': [0.0, 1.0, 0.0, 0.0, 0.0, 0.0] * 2
})
def load_tabular_demo(dataset_name=None, table_name=None, data_path=DATA_PATH, metadata=False):
"""Load a tabular demo.
If a dataset name is given, it is downloaded from the sdv-datasets S3 bucket.
Otherwise, a toy dataset with a single table that contains data from a short fake
collection of employees.
If ``metadata`` is ``True``, the output will be a tuple with a ``Metadata``
instance for the dataset and a ``pandas.DataFrame`` with the data from the table.
If ``metadata`` is ``False``, only the ``pandas.DataFrame`` is returned.
Args:
dataset_name (str):
Dataset name to be downloaded, if ``None`` use default demo data. Defaults to ``None``.
table_name (str):
If a table name is given, return this table from the indicated dataset.
Otherwise, return the first one.
data_path (str):
Data path to save the dataset files, only used if dataset_name is provided.
Defaults to ``DATA_PATH``.
metadata (bool):
If ``True`` also return a Table object. Defaults to ``False``.
Returns:
pandas.DataFrame or tuple:
If ``metadata`` is ``False`` return a ``pandas.DataFrame`` with the tables data.
If ``metadata`` is ``True`` return a ``tuple`` with a Table and the data.
"""
if dataset_name:
meta, tables = _load_demo_dataset(dataset_name, data_path)
if table_name is None:
table_name = meta.get_tables()[0]
table = _dtypes64(tables[table_name])
if metadata:
return Table.from_dict(meta.get_table_meta(table_name)), table
return table
table = _dtypes64(_load_tabular_dummy())
if metadata:
table_meta = Table.from_dict({
'fields': {
'company': {'type': 'categorical'},
'department': {'type': 'categorical'},
'name': {'type': 'categorical'},
'address': {'type': 'categorical'},
'age': {'type': 'numerical', 'subtype': 'integer'},
'age_when_joined': {'type': 'numerical', 'subtype': 'integer'},
'years_in_the_company': {'type': 'numerical', 'subtype': 'integer'},
'salary': {'type': 'numerical', 'subtype': 'float'},
'prior_years_experience': {'type': 'numerical', 'subtype': 'integer'}
},
'constraints': [
{
'constraint': 'UniqueCombinations',
'columns': ['company', 'department'],
},
{
'constraint': 'GreaterThan',
'low': 'age_when_joined',
'high': 'age'
},
{
'constraint': 'GreaterThan',
'low': 30000,
'high': 'salary'
},
{
'constraint': 'Positive',
'high': 'prior_years_experience'
},
{
'constraint': 'Rounding',
'columns': 'salary',
'digits': 2
}
],
'model_kwargs': {}
})
return table_meta, table
return table
def load_timeseries_demo(dataset_name=None, table_name=None, metadata=False):
"""Load a timeseries demo.
If a dataset name is given, it is downloaded from the sdv-datasets S3 bucket.
Otherwise, a the NASDAQ100_2019 dataset is loaded.
If ``metadata`` is ``True``, the output will be a tuple with a ``Metadata``
instance for the dataset and a ``pandas.DataFrame`` with the data from the table.
If ``metadata`` is ``False``, only the ``pandas.DataFrame`` is returned.
Args:
dataset_name (str):
Dataset name to be downloaded, if ``None`` use default dataset. Defaults to ``None``.
table_name (str):
If a table name is given, return this table from the indicated dataset.
Otherwise, return the first one.
data_path (str):
Data path to save the dataset files, only used if dataset_name is provided.
Defaults to ``DATA_PATH``.
metadata (bool):
If ``True`` also return a Table object. Defaults to ``False``.
Returns:
pandas.DataFrame or tuple:
If ``metadata`` is ``False`` return a ``pandas.DataFrame`` with the tables data.
If ``metadata`` is ``True`` return a ``tuple`` with a Table and the data.
"""
dataset_name = dataset_name or 'nasdaq100_2019'
return load_tabular_demo(dataset_name, table_name, data_path=DATA_PATH, metadata=metadata)
def get_available_demos():
"""Get available demos and information about them.
Returns:
pandas.DataFrame:
Table with the available demos.
"""
return pd.read_csv(DATASETS_URL)
|
the-stack_106_23744 | import data_utils
def _get_feed_dict_for_others(model, x_batch, y_batch, x_lens, use_pos=True, use_ner=True, use_deprel=True):
feed = {model.word_inputs:x_batch[data_utils.WORD_FIELD], model.labels:y_batch, model.seq_lens:x_lens}
if use_pos:
feed[model.pos_inputs] = x_batch[data_utils.POS_FIELD]
if use_ner:
feed[model.ner_inputs] = x_batch[data_utils.NER_FIELD]
if use_deprel:
feed[model.deprel_inputs] = x_batch[data_utils.DEPREL_FIELD]
return feed
def _get_feed_dict_for_sprnn(model, x_batch, y_batch, x_lens, use_pos=True, use_ner=True, use_deprel=True):
all_fields = [data_utils.WORD_FIELD, data_utils.POS_FIELD, data_utils.NER_FIELD, data_utils.DEPREL_FIELD, data_utils.ROOT_FIELD]
# convert root sequence into a list of ROOT index's
root_seq = x_batch[data_utils.ROOT_FIELD]
max_len = len(root_seq[0])
root_index_seq = []
for s in root_seq:
root_index_seq.append(s.index('ROOT')) # find the root position
# for each sequence type, divide the sequence based on the index sequence and pad to max length
# left batch: from subject to root; right_batch: from object to root
left_batch = {}
right_batch = {}
for k in all_fields: # each value is a batch of different sequence
batch = x_batch[k]
left_batch_seq, right_batch_seq = [], []
assert(len(batch) == len(root_index_seq))
for s, idx, length in zip(batch, root_index_seq, x_lens):
l = s[:idx+1]
r = s[idx:length][::-1] # remember to inverse the right batch so that ROOT is at the end
left_batch_seq.append(l + [data_utils.PAD_ID] * (max_len-len(l))) # pad
right_batch_seq.append(r + [data_utils.PAD_ID] * (max_len-len(r)))
left_batch[k], right_batch[k] = left_batch_seq, right_batch_seq
# calculate left and right seq lengths
left_lens = [idx + 1 for idx in root_index_seq]
right_lens = [(l - idx) for idx, l in zip(root_index_seq, x_lens)]
# now create the feed dict
feed = {model.word_inputs: left_batch[data_utils.WORD_FIELD] + right_batch[data_utils.WORD_FIELD], \
model.seq_lens: left_lens + right_lens, model.labels: y_batch}
if use_pos:
feed[model.pos_inputs] = left_batch[data_utils.POS_FIELD] + right_batch[data_utils.POS_FIELD]
if use_ner:
feed[model.ner_inputs] = left_batch[data_utils.NER_FIELD] + right_batch[data_utils.NER_FIELD]
if use_deprel:
feed[model.deprel_inputs] = left_batch[data_utils.DEPREL_FIELD] + right_batch[data_utils.DEPREL_FIELD]
return feed
|
the-stack_106_23745 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SqlPoolUsagesOperations:
"""SqlPoolUsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs
) -> AsyncIterable["_models.SqlPoolUsageListResult"]:
"""Gets SQL pool usages.
Gets SQL pool usages.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlPoolUsageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.SqlPoolUsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlPoolUsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SqlPoolUsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorContract, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/usages'} # type: ignore
|
the-stack_106_23746 | # Owner(s): ["module: sparse"]
import torch
import itertools
import functools
import operator
import random
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types_and_complex,
)
from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \
do_test_empty_full, load_tests, TEST_NUMPY, IS_WINDOWS, gradcheck, coalescedonoff, \
DeterministicGuard
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from numbers import Number
from typing import Dict, Any
from distutils.version import LooseVersion
from torch.testing import get_all_complex_dtypes, get_all_fp_dtypes
from torch.testing._internal.common_cuda import \
(SM53OrLater, SM80OrLater, CUDA11OrLater)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, ops, dtypes, dtypesIfCUDA, onlyCPU, onlyCUDA, precisionOverride,
deviceCountAtLeast, OpDTypes)
from torch.testing._internal.common_methods_invocations import \
(sparse_unary_ufuncs)
from torch.testing._internal.common_dtype import (
floating_and_complex_types, floating_and_complex_types_and, get_all_dtypes, get_all_int_dtypes,
)
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# batched grad doesn't support sparse
gradcheck = functools.partial(gradcheck, check_batched_grad=False)
CUSPARSE_SPMM_COMPLEX128_SUPPORTED = (
IS_WINDOWS and torch.version.cuda and LooseVersion(torch.version.cuda) > "11.2"
) or (not IS_WINDOWS and CUDA11OrLater)
class TestSparse(TestCase):
def setUp(self):
self.index_tensor = lambda *args, **kwargs: torch.tensor(*args, **kwargs, dtype=torch.int64)
def sparse_empty_factory(*args, **kwargs):
kwargs['layout'] = kwargs.get('layout', torch.sparse_coo)
return torch.empty(*args, **kwargs)
self.sparse_empty = sparse_empty_factory
def sparse_tensor_factory(*args, **kwargs):
return torch.sparse_coo_tensor(*args, **kwargs)
self.sparse_tensor = sparse_tensor_factory
self.legacy_sparse_tensor = torch.sparse.DoubleTensor
def _gen_sparse(self, sparse_dim, nnz, with_size, dtype, device, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dim
x, i, v = self.genSparseTensor(with_size, sparse_dim, nnz, not coalesced, dtype=dtype, device=device)
if not coalesced:
self.assert_uncoalesced(x)
return x, i, v
def assert_uncoalesced(self, x):
"""
Test if a CPU tensor is uncoalesced. This is used to ensure
correctness of the uncoalesced tensor generation algorithm.
"""
assert not x.is_coalesced()
existing_indices = set()
for i in range(x._nnz()):
index = str(x._indices()[:, i])
if index in existing_indices:
return True
else:
existing_indices.add(index)
def randn(self, *args, **kwargs):
"""
Variant of torch.randn that also works in the TEST_CUDA case.
"""
# TODO: Put this in torch.cuda.randn
return torch.empty(*args, **kwargs).normal_()
@dtypes(torch.double)
def test_print_coalesced(self, device, dtype):
self._test_print(device, dtype, True)
@dtypes(torch.double)
def test_print_uncoalesced(self, device, dtype):
self._test_print(device, dtype, False)
def _test_print(self, device, dtype, coalesced):
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
printed = []
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
printed.append("# shape: {}".format(torch.Size(shape)))
printed.append("# nnz: {}".format(nnz))
printed.append("# sparse_dim: {}".format(sparse_dim))
printed.append("# indices shape: {}".format(indices_shape))
printed.append("# values shape: {}".format(values_shape))
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape, dtype=dtype, device=device)
dtypes = [torch.int32]
if values.dtype == torch.double:
dtypes.append(torch.float)
else:
dtypes.append(torch.double)
for dtype in dtypes:
printed.append("########## {} ##########".format(dtype))
x = sp_tensor.detach().to(dtype)
printed.append("# sparse tensor")
printed.append(str(x))
if x.dtype.is_floating_point:
printed.append("# after requires_grad_")
printed.append(str(x.requires_grad_()))
printed.append("# after addition")
printed.append(str(x + x))
printed.append("# _indices")
printed.append(str(x._indices()))
printed.append("# _values")
printed.append(str(x._values()))
printed.append('')
self.assertExpected('\n'.join(printed))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_basic(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
self.assertEqual(i, x._indices())
self.assertEqual(v, x._values())
self.assertEqual(x.ndimension(), len(with_size))
self.assertEqual(x.coalesce()._nnz(), nnz)
self.assertEqual(list(x.size()), with_size)
# Test .indices() and .values()
if not coalesced:
with self.assertRaisesRegex(RuntimeError, "Cannot get indices on an uncoalesced tensor"):
x.indices()
with self.assertRaisesRegex(RuntimeError, "Cannot get values on an uncoalesced tensor"):
x.values()
else:
self.assertEqual(x.indices(), x._indices())
self.assertEqual(x.values(), x._values())
test_shape(3, 10, 100)
test_shape(3, 10, [100, 100, 100])
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
# Make sure that coalesce handles duplicate indices correctly
i = self.index_tensor([[9, 0, 0, 0, 8, 1, 1, 1, 2, 7, 2, 2, 3, 4, 6, 9]], device=device)
v = torch.tensor([[idx**2, idx] for idx in range(i.size(1))], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([10, 2]), dtype=dtype, device=device)
self.assertEqual(x.coalesce()._nnz(), 9)
# Make sure we can access empty indices / values
x = self.legacy_sparse_tensor()
self.assertEqual(x._indices().numel(), 0)
self.assertEqual(x._values().numel(), 0)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_coalesce(self, device, dtype, coalesced):
def _test_coalesce(t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map: Dict[Any, Any] = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(list(value_map.keys()))
_new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(_new_values)
else:
new_values = torch.stack(_new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
for empty_i, empty_v, empty_nnz in itertools.product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
t, _, _ = self._gen_sparse(len(sparse_size), nnz, sparse_size + dense_size, dtype, device, coalesced)
_test_coalesce(t) # this tests correctness
@dtypes(torch.double)
def test_coalesce_reference_cycle(self, device, dtype):
# Test coalesce doesn't create autograd graph cycles (gh-52253)
# Sanity check that the helper class works as expected
t = torch.rand(2)
t_ref = torch._C._WeakTensorRef(t)
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
def test_sparse_sum():
i = torch.tensor([[0], [4]], dtype=torch.long, device=device)
v = torch.tensor([[[-0.4567, -1.8797, 0.0380, 1.4316]]],
dtype=dtype, device=device)
S = torch.sparse_coo_tensor(i, v)
S = S.coalesce()
S.requires_grad_(True)
S2 = S.coalesce()
self.assertTrue(S2.is_coalesced())
return torch._C._WeakTensorRef(S2)
ref = test_sparse_sum()
self.assertTrue(ref.expired())
@dtypes(torch.double)
def test_ctor_large_sizes(self, device, dtype):
# Test that integer overflow is detected when computing numel
# of a sparse tensor with large dimensions (gh-57416). Notice
# that numel is computed internally when constructing a
# tensor, hence the overflow may appear during the tensor
# construction step.
N = 100000
indices = torch.tensor([[N, N - 1]] * 4, dtype=torch.int64, device=device)
values = torch.tensor([1, 2], dtype=dtype, device=device)
self.assertRaises(RuntimeError,
lambda: torch.sparse_coo_tensor(
indices, values, (N + 1,) * 4, device=device))
@dtypes(torch.double, torch.cdouble)
def test_ctor_size_checks(self, device, dtype):
indices = self.index_tensor([
[0, 0, 0],
[0, 3, 0],
[0, 0, 0],
[0, 0, 0],
], device=device)
values = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
# indices inconsistent with size
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 1, 1])))
# values inconsistent with size
values = torch.tensor([
[2, 1, 2, 1],
[1, 0, 5, 2],
], dtype=dtype, device=device)
self.assertRaises(
RuntimeError,
lambda: self.sparse_tensor(indices, values, torch.Size([2, 4, 2, 1])))
@dtypes(*floating_and_complex_types_and(torch.float16))
def test_to_dense(self, device, dtype):
def test_tensor(x, res):
x.to_dense() # Tests triple to_dense for memory corruption
x.to_dense()
x.to_dense()
# We dont have to_dense for half types, so we don't request
# exact_dtype if res.type is torch.float16.
dense_x = x.to_dense()
safe_dense_x = self.safeToDense(x)
if (res.dtype == torch.float16):
exact_dtype = False
else:
exact_dtype = True
dense_x = dense_x.to(res.dtype)
safe_dense_x = safe_dense_x.to(res.dtype)
self.assertEqual(res, dense_x, exact_dtype=exact_dtype)
self.assertEqual(res, safe_dense_x, exact_dtype=exact_dtype)
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
for value_type in [torch.double, torch.cdouble]:
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
# we don't have to_dense for half types on CPU because it is implemented
# with a slower add_ operation
v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]), dtype=value_type, device=device)
res = torch.tensor([
[[2, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 3, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 4]],
], dtype=dtype, device=device)
test_tensor(x, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
v = torch.empty(4, 0, dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]), dtype=value_type, device=device)
res = torch.empty((3, 4, 5, 0), dtype=dtype, device=device)
test_tensor(x, res)
# half tensors on cpu don't implement to_dense, so need to convert to float
def _to_dense_half_safe(self, tensor):
if(tensor.dtype == torch.half and tensor.device.type == 'cpu'):
return tensor.to(torch.float).to_dense().to(torch.half)
else:
return tensor.to_dense()
@coalescedonoff
@skipIfRocm
@dtypes(torch.float16, torch.float64, torch.int, torch.cfloat, torch.cdouble)
def test_to_sparse(self, device, dtype, coalesced):
shape = [5, 2, 10, 4]
max_nnz = 1
for value_type in [torch.double, torch.cdouble]:
for dim, dim_sz in enumerate(shape, 1):
max_nnz *= dim_sz
rnnz = torch.randint(2, max_nnz, (1,)).item()
for nnz in [0, 1, rnnz]:
expected, _, _ = self._gen_sparse(dim, nnz, shape, dtype=value_type, device=device,
coalesced=coalesced)
expected = expected.to(dtype)
d = self._to_dense_half_safe(expected)
result = d.to_sparse(dim)
self.assertEqual(d, self._to_dense_half_safe(result)) # == not implemented for sparse tensors yet
self.assertEqual(expected.size(), result.size())
self.assertEqual(dim, result.sparse_dim())
sp, _, _ = self._gen_sparse(2, 10, [3, 3, 3], dtype=value_type, device=device, coalesced=coalesced)
self.assertRaises(RuntimeError, lambda: sp.to_sparse())
@dtypes(torch.double, torch.cdouble)
def test_sparse_bool(self, device, dtype):
a = torch.tensor([True, False], dtype=dtype, device=device).to(torch.bool)
b = a.to_sparse().to_dense()
self.assertEqual(a, b)
@dtypes(torch.double, torch.cdouble)
def test_scalar(self, device, dtype):
# tensor with value
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1), 12.3, [], dtype=dtype, device=device)
self.assertEqual(1, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
# tensor with multiple values
a = self.sparse_tensor(self.index_tensor([], device=device).unsqueeze(1).expand(0, 2),
[12.3, 12.3], [], dtype=dtype, device=device)
self.assertEqual(2, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(12.3 * 2, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
# tensor without value
a = self.sparse_empty((), dtype=dtype, device=device)
self.assertEqual(0, a._values().numel())
self.assertEqual(a, a.clone())
a_coalesced = a.coalesce()
self.assertTrue(a_coalesced.is_coalesced())
self.assertEqual(torch.tensor(0, dtype=dtype, device=device), a.to_dense())
self.assertEqual(a, a.to_dense().to_sparse())
@dtypes(torch.double, torch.cdouble)
def test_shared(self, device, dtype):
i = self.index_tensor([[2]], device=device)
v = torch.tensor([5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
v[0] = 6
self.assertEqual(torch.tensor([0, 0, 6], dtype=dtype, device=device), self.safeToDense(x))
i[0][0] = 0
self.assertEqual(torch.tensor([6, 0, 0], dtype=dtype, device=device), self.safeToDense(x))
i = self.index_tensor([[2]], device=device)
v = torch.empty((1, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
i[0][0] = 0
self.assertEqual(torch.empty((3, 0), dtype=dtype, device=device), self.safeToDense(x))
@dtypes(torch.double, torch.cdouble)
def test_to_dense_hybrid(self, device, dtype):
def test_tensor(x, res):
x.to_dense() # Tests double to_dense for memory corruption
x.to_dense()
x.to_dense()
self.assertEqual(res, x.to_dense())
self.assertEqual(res, self.safeToDense(x))
def fn(x):
return x.to_dense()
x.requires_grad_(True)
gradcheck(fn, (x,), check_sparse_nnz=True)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.tensor([[2, 3], [1, 2], [3, 4], [4, 5]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2]))
res = torch.tensor([
[[2, 3],
[0, 0],
[0, 0],
[0, 0]],
[[1, 2],
[0, 0],
[0, 0],
[0, 0]],
[[3, 4],
[0, 0],
[0, 0],
[4, 5]],
], dtype=dtype, device=device)
test_tensor(x, res)
i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
], device=device)
v = torch.empty((4, 2, 0), dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 2, 0]))
res = torch.empty((3, 4, 2, 0), dtype=dtype, device=device)
test_tensor(x, res)
@dtypes(torch.double, torch.cdouble)
def test_contig(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([2, 1, 3, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([3, 2, 4, 1], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([6, 4], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@dtypes(torch.double, torch.cdouble)
def test_contig_hybrid(self, device, dtype):
def test_tensor(x, exp_i, exp_v):
x = x.coalesce()
self.assertEqual(exp_i, x._indices())
self.assertEqual(exp_v, x._values())
i = self.index_tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], device=device)
v = torch.tensor([
[1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
[6, 7], [7, 8], [8, 9], [9, 10], [10, 11],
], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([100, 100, 2]))
exp_i = self.index_tensor([
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
], device=device)
exp_v = torch.tensor([
[2, 3], [1, 2], [6, 7], [4, 5], [10, 11],
[3, 4], [5, 6], [9, 10], [8, 9], [7, 8],
], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 3, 3], [2, 2, 2], [4, 4, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.tensor([[2, 2, 2], [1, 1, 1], [3, 3, 3], [4, 4, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[2, 0, 2, 1],
[0, 0, 3, 0],
[1, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 1, 2, 2],
[0, 0, 0, 3],
[0, 0, 1, 4],
], device=device)
exp_v = torch.empty([4, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
# Duplicate indices
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.tensor([[3, 2, 3], [2, 1, 1], [4, 3, 4], [1, 1, 1]], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.tensor([[6, 4, 5], [4, 3, 4]], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
i = self.index_tensor([
[0, 0, 2, 0],
[0, 0, 3, 0],
[0, 0, 4, 0],
], device=device)
v = torch.empty([4, 3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 4, 5, 3, 0]))
exp_i = self.index_tensor([
[0, 2],
[0, 3],
[0, 4],
], device=device)
exp_v = torch.empty([2, 3, 0], dtype=dtype, device=device)
test_tensor(x, exp_i, exp_v)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_clone(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
if not coalesced:
self.assertFalse(x.is_coalesced())
y = x.clone()
self.assertFalse(y.is_coalesced())
x = x.coalesce()
self.assertTrue(x.is_coalesced())
y = x.clone()
self.assertTrue(y.is_coalesced())
test_shape(4, 20, 5)
test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_Sparse_to_Sparse_copy_(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor)
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
# test copy
x2_dense = x2.to_dense()
x1.copy_(x2)
self.assertEqual(x2_dense, x1.to_dense())
# test type conversion (when x1.copy_(x2), x1.dtype should stay the same)
x1 = x1.to(torch.float32)
x2 = x2.to(torch.float16)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
x2 = x2.to(torch.float64)
x1_dtype = x1.dtype
x1.copy_(x2)
self.assertEqual(x1_dtype, x1.dtype)
# test no broadcast
self.assertRaises(RuntimeError, lambda: x1.copy_(x2.narrow_copy(0, 0, 1)))
# test raise error on copy_() between dense and sparse Tensors
self.assertRaises(RuntimeError, lambda: x1.copy_(torch.randn(5, 5)))
# test autograd
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone()
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to_dense())
self.assertEqual(None, x1.grad)
@coalescedonoff
@unittest.skipIf(torch.cuda.device_count() < 2, "no multi-GPU")
@dtypes(torch.double, torch.cdouble)
def test_Sparse_to_Sparse_copy_multi_gpu(self, device, dtype, coalesced):
# This is for testing torch.copy_(SparseTensor, SparseTensor) across GPU devices
sparse_dims = 3
nnz = 10
sizes = [2, 3, 4, 5] # hybrid sparse
x1, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(sparse_dims, nnz + 10, sizes, dtype, device, coalesced)
x1 = x1.to('cuda:0')
def test_cross_device(x1, x2):
x1_device = x1.device
x1.copy_(x2)
self.assertEqual(x2.to('cuda:0').to_dense(), x1.to_dense())
self.assertEqual(x1_device, x1.device)
test_cross_device(x1, x2.to('cuda:1')) # test across gpu devices
test_cross_device(x1, x2.to('cpu')) # test between cpu and gpu
# test autograd
x2 = x2.to('cuda:1')
x2.requires_grad_(True)
x1.copy_(x2)
y = x1 * 2
x2_clone = x2.clone().to('cuda:0')
y.backward(x2_clone)
expected_grad = x2_clone * 2
self.assertEqual(expected_grad.to_dense(), x2.grad.to('cuda:0').to_dense())
self.assertEqual(None, x1.grad)
@onlyCUDA
def test_cuda_empty(self, device):
def test_tensor(x):
y = x.to(device)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
x = y.cpu()
self.assertEqual(y.sparse_dim(), x.sparse_dim())
self.assertEqual(y.dense_dim(), x.dense_dim())
x = torch.sparse.FloatTensor(2, 3, 4)
test_tensor(x)
x = torch.sparse.HalfTensor(2, 3, 4)
test_tensor(x)
x = torch.cuda.sparse.HalfTensor(2, 3, 4)
test_tensor(x)
x = torch.sparse.FloatTensor(2, 3, 4, 0)
test_tensor(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_transpose(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
y = self.safeToDense(x)
for i, j in itertools.combinations(range(4), 2):
x = x.transpose_(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
x = x.transpose(i, j)
y = y.transpose(i, j)
self.assertEqual(self.safeToDense(x), y)
test_shape(4, 6, 3)
test_shape(4, 3, [7, 7, 7, 3, 3, 3, 0])
test_shape(4, 0, [0, 0, 7, 3, 3, 3, 0])
@coalescedonoff
@onlyCPU
@dtypes(torch.double)
def test_coalesce_transpose_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [dj, di], dtype, device, coalesced)
y = torch.randn(dj, dk, dtype=dtype, device=device)
x_coalesced = x.coalesce()
self.assertTrue(x_coalesced.is_coalesced())
x_coalesced_t = x_coalesced.t()
# Transpose is `colasced`-preserving if the indices tensor is empty.
self.assertEqual(x_coalesced_t.is_coalesced(), di * nnz == 0)
res = torch.mm(x_coalesced_t, y)
expected = torch.mm(self.safeToDense(x_coalesced_t), y)
self.assertEqual(res, expected)
test_shape(10, 20, 30, 20)
test_shape(0, 20, 30, 0)
test_shape(10, 0, 30, 0)
test_shape(10, 20, 0, 0)
test_shape(10, 20, 0, 20)
@dtypes(torch.double, torch.cdouble)
def test_t_empty(self, device, dtype):
def test_in_place(x):
shape_original = x.shape
x.t_()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), x.size())
self.assertEqual(0, x._indices().numel())
self.assertEqual(0, x._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
def test_not_in_place(x):
shape_original = x.shape
y = x.t()
self.assertEqual(torch.Size([shape_original[1], shape_original[0]]), y.size())
self.assertEqual(0, y._indices().numel())
self.assertEqual(0, y._values().numel())
self.assertEqual(x.sparse_dim(), 2)
self.assertEqual(x.dense_dim(), 0)
x = self.sparse_empty(2, 3, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
x = self.sparse_empty(2, 0, dtype=dtype, device=device)
test_in_place(x)
test_not_in_place(x)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_add_zeros(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
zeros = torch.zeros(sizes, layout=torch.sparse_coo).to(x.device)
r1 = zeros + x
r2 = x + zeros
self.assertEqual(r1, x)
self.assertEqual(r2, x)
test_shape(1, 20, [1])
test_shape(4, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 5])
test_shape(2, 20, [3, 17, 19, 0])
@dtypes(torch.double, torch.cdouble)
def test_add_sub_nnz(self, device, dtype):
# nnz should not grow unbounded (gh-34964)
x = torch.randn(10, dtype=dtype, device=device).to_sparse()
x.add_(x)
x.add_(x)
self.assertLessEqual(x._nnz(), 10)
x.sub_(2 * x)
x.sub_(2 * x)
self.assertLessEqual(x._nnz(), 10)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_cat(self, device, dtype, coalesced):
# shapes: list of tuples (sparse_dims, nnz, sizes)
def test_shapes(shapes, dim, fail_message=None):
inputs = [self._gen_sparse(shape[0], shape[1], shape[2], dtype, device, coalesced)[0]
for shape in shapes]
if fail_message:
with self.assertRaisesRegex(RuntimeError, fail_message):
torch.cat(inputs, dim)
else:
result = torch.cat(inputs, dim)
dense_result = torch.cat([t.to_dense() for t in inputs], dim)
self.assertEqual(dense_result, result.to_dense())
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], 1)
# mismatched sizes
test_shapes([(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4])], 0,
"All tensors must have the same shape: \\[2, 3, 4].*\\[2, 1, 4]")
# hybrid sparse/dense
test_shapes(
[(2, 10, [2, 3, 4]), (2, 10, [2, 1, 4]), (2, 10, [2, 4, 4])], 1)
# cat along dense dim
test_shapes([(2, 10, [2, 3, 4]), (2, 10, [2, 3, 7])], 2)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 1)
test_shapes([(1, 10, [2, 3, 4]), (1, 10, [2, 3, 4])], 2)
# mismatched dimensions
test_shapes([(2, 10, [2, 3, 4]), (3, 10, [2, 3, 4])], 0,
"All tensors must have the same.*2, 1, but tensor at position 1 has 3, 0.")
# wrapped dimension
test_shapes(
[(3, 10, [2, 3, 4]), (3, 10, [2, 1, 4]), (3, 10, [2, 4, 4])], -2)
# sparse with dense
sp = self._gen_sparse(3, 10, [2, 3, 4], dtype, device, coalesced)[0]
dn = sp.to_dense()
with self.assertRaisesRegex(RuntimeError,
"Concatenating sparse tensors, but a dense tensor was found at position 1."):
torch.cat((sp, dn))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_unsqueeze(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, unsqueeze_dim, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.unsqueeze(x, unsqueeze_dim)
else:
result = torch.unsqueeze(x, unsqueeze_dim)
dense_result = torch.unsqueeze(x.to_dense(), unsqueeze_dim)
self.assertEqual(dense_result, result.to_dense())
# basic case
test_shape(3, 10, [5, 7, 11], 0)
# hybrid sparse/dense, unsqueeze along sparse dim
test_shape(3, 10, [5, 7, 11, 13, 17], 0)
test_shape(3, 10, [5, 7, 11, 13, 17], 3)
# unsqueeze along dense dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], 4)
test_shape(3, 10, [5, 7, 11, 13, 17], 5)
# wrapped dimensions
test_shape(3, 10, [5, 7, 11, 13, 17], -1)
test_shape(3, 10, [5, 7, 11, 13, 17], -6)
# bounds
test_shape(3, 10, [5, 7, 11, 13, 17], -7, "Dimension out of range")
test_shape(3, 10, [5, 7, 11, 13, 17], 6, "Dimension out of range")
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.select(x, select_dim, select_index)
else:
result = torch.select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
# hybrid sparse/dense, select sparse dim, result is dense
for i in range(sizes[0]):
test_shape(1, 10, sizes, 0, i)
test_shape(1, 10, sizes, 0, sizes[0] + 1, r'select[(][)][:] index \d out of range.*')
# hybrid sparse/dense, select sparse dim, result is sparse
for d in range(3):
for i in range(sizes[d]):
test_shape(3, 10, sizes, d, i)
# hybrid sparse/dense, select dense dim, result is sparse
for d in range(1, 3):
for i in range(sizes[d]):
test_shape(1, 10, sizes, d, i)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_index_select(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, sizes, select_dim, select_index, fail_message=None):
if isinstance(select_index, int):
select_index = [select_index]
if isinstance(select_index, list):
select_index = torch.tensor(select_index, device=device, dtype=torch.long)
x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes, dtype, device, coalesced)
if fail_message:
with self.assertRaisesRegex(IndexError, fail_message):
torch.index_select(x, select_dim, select_index)
else:
result = torch.index_select(x, select_dim, select_index)
if result.is_sparse:
result = result.to_dense()
dense_result = torch.index_select(x.to_dense(), select_dim, select_index)
self.assertEqual(dense_result, result)
sizes = [5, 7, 11, 13, 17]
for d in range(len(sizes)):
for index in [0, sizes[d] - 1, [0, sizes[d] // 2, sizes[d] - 1]]:
test_shape(1, 10, sizes, d, index)
test_shape(len(sizes) // 2, 10, sizes, d, index)
test_shape(len(sizes), 10, sizes, d, index)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_mm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(di, dk, dtype=dtype, device=device)
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, self.safeToDense(x), y)
self.assertEqual(res, expected)
res = torch.mm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 200, 20)
test_shape(64, 10000, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 100, 0)
test_shape(10, 100, 0, 0)
test_shape(10, 100, 0, 20)
@unittest.skipIf(
IS_WINDOWS and TEST_CUDA,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@unittest.skipIf(
TEST_CUDA and _get_torch_cuda_version() < (10, 1),
"bmm sparse-dense requires CUDA 10.1 or greater"
)
@coalescedonoff
@dtypes(torch.double)
def test_bmm(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for mat_idx in range(num_mats):
a_mat = self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0]
b_mat = torch.randn([dim_j, dim_k], dtype=dtype, device=device)
a_list.append(a_mat)
b_list.append(b_mat)
a = torch.stack(a_list)
b = torch.stack(b_list)
ab = a.bmm(b)
# Compare each matrix against result from mm()
for mat_idx in range(num_mats):
a_mat = a_list[mat_idx]
b_mat = b_list[mat_idx]
ab_mat_bmm = ab[mat_idx]
ab_mat_mm = a_mat.mm(b_mat)
self.assertEqual(ab_mat_bmm, ab_mat_mm)
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
a = torch.rand([10, 23, 32], dtype=dtype, device=device)
a[3] = torch.zeros(23, 32, dtype=dtype, device=device)
a[6] = torch.zeros(23, 32, dtype=dtype, device=device)
a = a.to_sparse()
b = torch.rand([10, 32, 10], dtype=dtype, device=device)
b[4] = torch.zeros(32, 10, dtype=dtype, device=device)
b[6] = torch.zeros(32, 10, dtype=dtype, device=device)
ab = a.bmm(b)
for mat_idx in range(ab.size(0)):
ab_mat = ab[mat_idx]
ab_mat_check = a[mat_idx].mm(b[mat_idx])
self.assertEqual(ab_mat, ab_mat_check)
ab_traspose_check = b.transpose(1, 2).to_sparse().bmm(
a.transpose(1, 2).to_dense()
).transpose(1, 2)
self.assertEqual(ab, ab_traspose_check)
@onlyCUDA
@coalescedonoff
@dtypes(torch.double)
@unittest.skipIf(
IS_WINDOWS,
"bmm sparse-dense CUDA is not yet supported in Windows, at least up to CUDA 10.1"
)
@unittest.skipIf(
_get_torch_cuda_version() < (10, 1),
"bmm sparse-dense requires CUDA 10.1 or greater"
)
def test_bmm_deterministic(self, device, dtype, coalesced):
def test_shape(num_mats, dim_i, dim_j, dim_k, nnz):
a_list = []
b_list = []
for mat_idx in range(num_mats):
a_list.append(self._gen_sparse(2, nnz, [dim_i, dim_j], dtype, device, coalesced)[0])
b_list.append(torch.randn([dim_j, dim_k], dtype=dtype, device=device))
a = torch.stack(a_list).cuda()
b = torch.stack(b_list).cuda()
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
torch.use_deterministic_algorithms(False)
ab_nondeterministic = torch.bmm(a, b)
torch.use_deterministic_algorithms(True)
ab_deterministic = torch.bmm(a, b)
diff_abs = (ab_deterministic - ab_nondeterministic).abs()
diff_rel = diff_abs / ab_deterministic.abs()
diff_rel[torch.isnan(diff_rel)] = 0
# deterministic and non-deterministic results should either be
# equal or within a small relative difference
equal_abs_or_rel = diff_abs.eq(0).logical_or(diff_rel.lt(0.001))
self.assertTrue(equal_abs_or_rel.all())
test_shape(10, 10, 100, 99, 20)
test_shape(10, 100, 1000, 200, 20)
test_shape(10, 64, 10000, 300, 20)
test_shape(10, 0, 100, 99, 0)
test_shape(10, 10, 0, 100, 0)
test_shape(10, 10, 100, 0, 0)
test_shape(10, 10, 100, 0, 20)
test_shape(10, 10, 100, 0, 20)
@onlyCUDA
@unittest.skipIf(
not IS_WINDOWS or _get_torch_cuda_version() >= (11, 0),
"this test ensures bmm sparse-dense CUDA gives an error when run on Windows with CUDA < 11.0"
)
@dtypes(torch.double)
def test_bmm_windows_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense CUDA is not supported on Windows with cuda before 11.0"):
ab = a.bmm(b)
@onlyCUDA
@skipIfRocm
@unittest.skipIf(
_get_torch_cuda_version() >= (10, 1),
"this test ensures bmm gives error if CUDA version is less than 10.1"
)
@dtypes(torch.double)
def test_bmm_cuda_version_error(self, device, dtype):
a = torch.rand(2, 2, 2, dtype=dtype).to_sparse().cuda()
b = torch.rand(2, 2, 2, dtype=dtype).cuda()
with self.assertRaisesRegex(
RuntimeError,
"bmm sparse-dense requires CUDA 10.1 or greater"):
ab = a.bmm(b)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_saddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = torch.saddmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = torch.saddmm(t, x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
res = torch.smm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
@onlyCPU
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sspaddmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
t = self._gen_sparse(2, nnz, [di, dk], dtype, device, coalesced)[0]
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
res = t.sspaddmm(x, y, beta=beta, alpha=alpha)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y, beta=beta, alpha=alpha)
self.assertEqual(self.safeToDense(res), expected)
res = t.sspaddmm(x, y)
expected = torch.addmm(self.safeToDense(t), self.safeToDense(x), y)
self.assertEqual(self.safeToDense(res), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
# Test code from issue https://github.com/pytorch/pytorch/issues/45113
batch_size, input_size, hidden_size = 5, 3, 7
# Create coalesced sparse tensor with non-contiguous indices
weight = torch.randn(hidden_size, input_size, dtype=dtype, device=device).to_sparse()
self.assertTrue(weight.is_coalesced())
non_contig_indices = weight.indices().mT.contiguous().mT
weight = torch.sparse_coo_tensor(
indices=non_contig_indices, values=weight.values(), size=weight.shape)
weight._coalesced_(True)
self.assertFalse(weight._indices().is_contiguous())
# Create un/coalesced sparse tensor
bias = torch.randn((hidden_size, 1), dtype=dtype, device=device).to_sparse()
bias = torch.cat([bias] * batch_size, dim=1)
if coalesced:
bias = bias.coalesce()
x = torch.randn(input_size, batch_size, dtype=dtype, device=device)
res = bias.sspaddmm(weight, x)
true_result = (bias.to_dense() + torch.matmul(weight.to_dense(), x)).to_sparse()
self.assertEqual(self.safeToDense(res), self.safeToDense(true_result))
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_addmm(self, device, dtype, coalesced):
def test_shape(m, n, p, nnz, broadcast, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device, requires_grad=True)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device, requires_grad=True)
D2 = make_tensor([m, p], dtype=dtype, device=device, requires_grad=True)
S = self._gen_sparse(2, nnz, [n, m], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
def fn(S, D1, D2, beta=beta, alpha=alpha):
return torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
gradcheck(fn, (S, D1, D2), check_sparse_nnz=True)
test_shape(7, 8, 9, 20, False, None)
test_shape(7, 8, 9, 20, True, None)
test_shape(7, 8, 9, 20, False, (1, 0))
test_shape(7, 8, 9, 20, True, (1, 0))
test_shape(7, 8, 9, 20, False, (1, 1))
test_shape(7, 8, 9, 20, True, (1, 1))
@coalescedonoff
@dtypes(torch.double)
def test_sparse_mm(self, device, dtype, coalesced):
def test_shape(d1, d2, d3, nnz, transposed):
if transposed:
D = torch.randn(d3, d2, dtype=dtype,
device=device).t_().requires_grad_(True)
else:
D = torch.randn(d2, d3, dtype=dtype, device=device).requires_grad_(True)
S = self._gen_sparse(2, nnz, [d1, d2], dtype, device, coalesced)[0]
S_dense = S.to_dense().requires_grad_(True)
S.requires_grad_(True)
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
def fn(S, D):
return torch.sparse.mm(S, D)
gradcheck(fn, (S, D), check_sparse_nnz=True)
test_shape(7, 8, 9, 20, False)
test_shape(7, 8, 9, 20, True)
@coalescedonoff
@dtypes(torch.double)
def test_dsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.dsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res, expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
def test_hsmm(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)[0]
y = self.randn(dj, dk, dtype=dtype, device=device)
res = torch.hsmm(x, y)
expected = torch.mm(self.safeToDense(x), y)
self.assertEqual(res.to_dense(), expected)
test_shape(7, 5, 3, 20)
test_shape(1000, 100, 100, 20)
test_shape(3000, 64, 300, 20)
test_shape(0, 100, 100, 0)
test_shape(1000, 0, 100, 0)
test_shape(1000, 100, 0, 0)
test_shape(1000, 100, 0, 20)
@coalescedonoff
@dtypes(torch.double)
def test_spadd(self, device, dtype, coalesced):
def _test_spadd_shape(nnz, shape_i, shape_v=None):
shape = shape_i + (shape_v or [])
x, _, _ = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
y = self.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = self.randn(*s, dtype=dtype, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * self.safeToDense(x)
self.assertEqual(res, expected)
x, i, v = self._gen_sparse(len(shape_i), nnz, shape, dtype, device, coalesced)
nnz = i.size(1)
# Non contiguous sparse indices tensor
x_ = self.sparse_tensor(i[:, ::2], v[:(nnz + 1) // 2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse values tensor
x_ = self.sparse_tensor(i[:, :(nnz + 1) // 2], v[::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
# Non contiguous sparse indices and values tensors
x_ = self.sparse_tensor(i[:, 1::2], v[1::2], x.shape, dtype=dtype, device=device)
res = torch.add(y, x_, alpha=r)
expected = y + r * self.safeToDense(x_)
self.assertEqual(res, expected)
def _test_spadd():
_test_spadd_shape(10, [5, 6])
_test_spadd_shape(10, [10, 10, 10])
_test_spadd_shape(10, [50, 30, 20])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5])
_test_spadd_shape(0, [0, 30, 20])
_test_spadd_shape(0, [50, 0, 20])
_test_spadd_shape(0, [50, 30, 0])
def _test_spadd_hybrid():
_test_spadd_shape(10, [5, 6], [2, 3])
_test_spadd_shape(10, [10, 10, 10], [3])
_test_spadd_shape(10, [50, 30, 20], [2])
_test_spadd_shape(10, [5, 5, 5, 5, 5, 5], [2])
_test_spadd_shape(0, [0, 30, 20], [2, 0])
_test_spadd_shape(0, [50, 0, 20], [2, 0])
_test_spadd_shape(0, [50, 30, 0], [2, 0])
_test_spadd_shape(10, [50, 30, 20], [2, 0])
_test_spadd()
_test_spadd_hybrid()
@onlyCUDA
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_add_out_bfloat16(self, device, dtype, coalesced):
# fp32
x, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
y, _, _ = self._gen_sparse(3, 5, 10, dtype, device, coalesced)
x = x.float().cuda()
y = y.float().cuda()
res_fp32 = torch.add(x, y)
# bfloat16
x = x.bfloat16()
y = y.bfloat16()
res_bf16 = torch.add(x, y)
res_bf16 = res_bf16.float() # to compare with reference
self.assertEqual(res_fp32, res_bf16, atol=1e-2, rtol=0)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_norm(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, _, _ = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
y = x.coalesce()
self.assertEqual(x.norm(), y._values().norm())
test_shape(3, 10, 100)
test_shape(4, 10, [100, 100, 100, 5, 5, 5, 0])
test_shape(4, 0, [0, 0, 100, 5, 5, 5, 0])
# Unsupported arguments should error
kwarg_error_pairs = [
({'keepdim': True},
RuntimeError, r'norm_sparse currently does not support keepdim=True'),
({'dim': 0},
RuntimeError, r'norm_sparse currently only supports full reductions'),
({'dtype': torch.double, 'p': 'fro'},
ValueError, r'dtype argument is not supported in frobenius norm'),
({'dtype': torch.double, 'p': 0},
RuntimeError, r"norm_sparse currently does not support 'dtype' argument")
]
x = self._gen_sparse(3, 10, 100, dtype, device, coalesced)[0]
for kwargs, err, msg in kwarg_error_pairs:
with self.assertRaisesRegex(err, msg):
x.norm(**kwargs)
@coalescedonoff
@dtypes(torch.double)
def test_sparse_sum(self, device, dtype, coalesced):
def run_tests(S, td=None):
D = S.coalesce().to_dense().detach().requires_grad_(True)
if td is None:
S_sum = torch.sparse.sum(S)
D_sum = D.sum()
self.assertEqual(S_sum.item(), D_sum.item())
def fn(S):
res = torch.sparse.sum(S)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
else:
S_sum = torch.sparse.sum(S, td)
D_sum = D.sum(td)
self.assertEqual(S_sum.to_dense() if S_sum.is_sparse else S_sum, D_sum)
def fn(S):
res = torch.sparse.sum(S, td)
if res.is_sparse:
res = res.to_dense()
return res
gradcheck(fn, (S,), check_sparse_nnz=True)
nnz = 10
sparse_dims = 2
with_size = [5, 5, 1, 4] # use a dense dim = 1 to test for squeeze
test_dims = []
for i in range(1, 5):
test_dims += itertools.combinations(range(len(with_size)), i)
# https://github.com/pytorch/pytorch/issues/16501
x = torch.tensor([[1., 0., 0., 1.],
[0., 1., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 2.]], dtype=dtype, device=device).to_sparse()
self.assertEqual(torch.sparse.sum(x, dim=0), torch.sparse.sum(x, dim=-2))
self.assertEqual(torch.sum(x.to_dense(), dim=0), torch.sparse.sum(x, dim=0).to_dense())
# not support SparseTensor.sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
self.assertRaises(RuntimeError, lambda: S.sum())
# dim out of range
self.assertRaises(IndexError, lambda: torch.sparse.sum(S, 5))
# dim 0 appears multiple times in the list of dims
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(S, [0, 0]))
# sum an empty tensor
empty_S = torch.sparse_coo_tensor(size=with_size, dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.sparse.sum(empty_S, [0]))
self.assertEqual(torch.sparse.sum(empty_S), torch.tensor(0, dtype=dtype, device=device))
empty_S.requires_grad_(True)
empty_S_sum = torch.sparse.sum(empty_S)
empty_S_sum.backward()
self.assertEqual(empty_S.grad.to_dense(), empty_S.clone().detach().to_dense())
# test values().sum()
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True))
for test_dim in test_dims:
S = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)[0]
run_tests(S.requires_grad_(True), test_dim)
def _test_basic_ops_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v)
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 - x2
y2 = x1.clone()
y2.sub_(x2)
expected = self.safeToDense(x1) - self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * x2
y2 = x1.clone()
y2.mul_(x2)
expected = self.safeToDense(x1) * self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 * 37.5
y2 = x1.clone()
y2.mul_(37.5)
expected = self.safeToDense(x1) * 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y1 = x1 / 37.5
y2 = x1.clone()
y2.div_(37.5)
expected = self.safeToDense(x1) / 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
with self.assertWarnsOnceRegex(UserWarning, '__floordiv__'):
y1 = x1 // 37.5
y2 = x1.clone()
with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):
y2.floor_divide_(37.5)
expected = self.safeToDense(x1) // 37.5
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
# TODO: add back inplace support
y1 = x1 ** 2
y2 = x1.clone()
y2 = y2.pow(2)
expected = self.safeToDense(x1) ** 2
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
y = x1.clone()
y.zero_()
expected = torch.zeros(x1.size(), dtype=dtype, device=device)
self.assertEqual(self.safeToDense(y), expected)
self.assertEqual(x1.is_coalesced(), coalesced)
y = x1.coalesce()
z = x1.coalesce()
self.assertEqual(x1.is_coalesced(), coalesced)
self.assertTrue(y.is_coalesced())
self.assertEqual(x1, y)
y._values().add_(1)
if not x1.is_coalesced():
# check that coalesce is out of place if the original tensor is not
# coalesced.
self.assertEqual(z._values() + 1, y._values())
else:
# check that coalesce is in-place if the original tensor is
# coalesced.
self.assertEqual(z._values(), y._values())
@coalescedonoff
@dtypes(torch.double)
def test_basic_ops(self, device, dtype, coalesced):
def _test_basic_ops():
self._test_basic_ops_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
def _test_basic_ops_hybrid():
self._test_basic_ops_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_basic_ops_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_basic_ops_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
_test_basic_ops()
_test_basic_ops_hybrid()
@dtypes(torch.double, torch.cdouble)
def test_add_dense_sparse_mismatch(self, device, dtype):
def test_shape(dense_size, sparse_dims_shape, dense_dims_shape, sparse_size):
x = torch.zeros(dense_size, dtype=dtype, device=device)
sparse_y = self.sparse_tensor(torch.zeros(sparse_dims_shape, dtype=torch.int64, device=device),
torch.randn(dense_dims_shape, dtype=dtype, device=device),
torch.Size(sparse_size))
with self.assertRaisesRegex(
RuntimeError,
"add: expected 'self' and 'other' to have same size"):
x + sparse_y
test_shape([3, 4], [1, 4], [4, 4, 4], [3, 4, 4])
test_shape([3, 4, 0], [1, 4], [4, 4, 4, 0], [3, 4, 4, 0])
@dtypes(torch.double, torch.cdouble)
def test_add_noncontiguous(self, device, dtype):
indices = self.index_tensor([[1, 2], [0, 2]], device=device)
values = torch.tensor([1.], dtype=dtype, device=device).expand(2, 3, 4, 5)
x = self.sparse_tensor(indices, values, dtype=dtype, device=device)
assert not x._values().is_contiguous()
y = x + x
expected = self.safeToDense(x) + self.safeToDense(x)
self.assertEqual(self.safeToDense(y), expected)
def _test_sparse_mask_shape(self, nnz_x1, nnz_x2, shape_i, shape_v, dtype, device, coalesced):
shape = shape_i + (shape_v or [])
x1, _, _ = self._gen_sparse(len(shape_i), nnz_x1, shape, dtype, device, coalesced)
x2, _, _ = self._gen_sparse(len(shape_i), nnz_x2, shape, dtype, device, coalesced)
y1 = x1 + x2
y2 = x1.clone()
y2.add_(x2)
expected = self.safeToDense(x1) + self.safeToDense(x2)
self.assertEqual(self.safeToDense(y1), expected)
self.assertEqual(self.safeToDense(y2), expected)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_mask(self, device, dtype, coalesced):
def _test_sparse_mask_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.tensor([1, 2, 3, 4], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4]), dtype=dtype, device=device).coalesce()
dense = torch.tensor([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
], dtype=dtype, device=device)
exp_v = torch.tensor([7, 14, 3, 20], dtype=dtype, device=device)
res = dense.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4]), dtype=dtype, device=device)
self.assertEqual(res, expected)
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
], device=device)
v = torch.empty([4, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 0])).coalesce()
dense = torch.empty([5, 4, 0], dtype=dtype, device=device)
exp_v = torch.empty([4, 0], dtype=dtype, device=device)
res = dense.sparse_mask(x)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 0]), dtype=dtype, device=device)
self.assertEqual(res, expected)
_test_sparse_mask_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [], dtype, device, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_sparse_mask_hybrid(self, device, dtype, coalesced):
def _test_sparse_mask_hybrid_fixed():
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.tensor([[1, 2], [2, 3], [3, 4], [4, 5]])
# TODO: This is also testing that, if coalesce is a no-op,
# the indices don't get permuted. I don't know if we actually
# want to give this invariant.
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2])).coalesce()
dense = torch.tensor([
[[1, 3], [2, 2], [3, 3], [4, 2]],
[[5, 7], [6, 7], [7, 9], [8, 9]],
[[9, 2], [10, 4], [11, 1], [12, 3]],
[[13, 5], [14, 1], [15, 1], [16, 6]],
[[17, 7], [18, 2], [19, 7], [20, 1]],
])
res = dense.sparse_mask(x)
exp_v = torch.tensor([[7, 9], [14, 1], [3, 3], [20, 1]])
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2]))
self.assertEqual(res, expected)
i = self.index_tensor([
[1, 3, 0, 4],
[2, 1, 2, 3],
])
v = torch.empty(4, 2, 0)
x = self.sparse_tensor(i, v, torch.Size([5, 4, 2, 0])).coalesce()
dense = torch.empty(5, 4, 2, 0)
res = dense.sparse_mask(x)
exp_v = torch.empty(4, 2, 0)
expected = self.sparse_tensor(i, exp_v, torch.Size([5, 4, 2, 0]))
self.assertEqual(res, expected)
_test_sparse_mask_hybrid_fixed()
self._test_sparse_mask_shape(9, 12, [5, 6], [2, 3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [3], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [50, 30, 20], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [5, 5, 5, 5, 5, 5], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 12, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(9, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 10], [2, 0], dtype, device, coalesced)
self._test_sparse_mask_shape(0, 0, [10, 10, 0], [2, 0], dtype, device, coalesced)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_zeros(self, device, dtype, coalesced):
def _test_zeros(nnzs, shape, out_shape_i, out_shape_v=None):
out_shape = out_shape_i + (out_shape_v or [])
for nnz in nnzs:
out, _, _ = self._gen_sparse(len(out_shape_i), nnz, out_shape, dtype, device, coalesced)
torch.zeros(*shape, out=out, dtype=dtype, device=device)
self.assertEqual(tuple(out.size()), tuple(shape))
self.assertTrue(out._indices().numel() == out._values().numel() == 0)
self.assertEqual(out._nnz(), 0)
self.assertEqual(out.sparse_dim(), len(shape))
self.assertEqual(out.dense_dim(), 0)
def test_shape(i_shapes, v_shapes, shape, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros(nnzs, shape, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 4], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 4], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [2, 3, 0], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [2, 3, 0], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [2, 3, 0], [9, 12])
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_zeros_like(self, device, dtype, coalesced):
def _test_zeros_like(nnzs, template_shape_i, template_shape_v=None):
template_shape_v = template_shape_v or []
template_shape = template_shape_i + template_shape_v
for nnz in nnzs:
t, _, _ = self._gen_sparse(len(template_shape_i), nnz, template_shape, dtype, device, coalesced)
res = torch.zeros_like(t)
self.assertEqual(tuple(res.size()), tuple(template_shape))
self.assertTrue(res._indices().numel() == res._values().numel() == 0)
self.assertEqual(res._nnz(), 0)
self.assertEqual(res.sparse_dim(), len(template_shape_i))
self.assertEqual(res.dense_dim(), len(template_shape_v))
def test_shape(i_shapes, v_shapes, nnzs):
for i_dim in range(1, len(i_shapes) + 1):
for v_dim in range(len(v_shapes) + 1):
_test_zeros_like(nnzs, i_shapes[:i_dim], v_shapes[:v_dim])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
test_shape([2, 3, 4], [3, 4, 5, 6], [9, 12])
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.zeros_like(x, memory_format=mem_format)
result = torch.zeros_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
dense_tensor = sparse_tensor.to_dense()
result = torch.zeros_like(dense_tensor, layout=torch.sparse_coo)
self.assertEqual(dense_tensor.shape, result.shape)
self.assertEqual(result.layout, torch.sparse_coo)
sparse_zeros = torch.zeros(dense_tensor.shape, layout=torch.sparse_coo)
self.assertEqual(result._indices().shape, sparse_zeros._indices().shape)
self.assertEqual(result._values().shape, sparse_zeros._values().shape)
def _assert_sparse_invars(self, t):
# SparseTensor has the following invariants:
# - sparse_dim + dense_dim = len(SparseTensor.shape)
# - SparseTensor._indices().shape = (sparse_dim, nnz)
# - SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
self.assertEqual(t.sparse_dim() + t.dense_dim(), len(t.shape))
self.assertEqual(tuple(t._indices().shape), (t.sparse_dim(), t._nnz()))
self.assertEqual(tuple(t._values().shape), (t._nnz(), ) + t.shape[t.sparse_dim():])
def _test_empty_like(self, sparse_tensor, dtype, device, coalesced):
result = torch.empty_like(sparse_tensor)
self.assertTrue(result.is_sparse)
self._assert_sparse_invars(result)
self.assertEqual(result.shape, sparse_tensor.shape)
self.assertEqual(result.dtype, sparse_tensor.dtype)
self.assertEqual(result.device, sparse_tensor.device)
self.assertEqual(result.sparse_dim(), sparse_tensor.sparse_dim())
self.assertEqual(result.dense_dim(), sparse_tensor.dense_dim())
sparse_tensor, _, _ = self._gen_sparse(len([2, 3]), 9, [2, 3] + [5, 6], dtype, device, coalesced)
data = (sparse_tensor, sparse_tensor, sparse_tensor, sparse_tensor.unsqueeze(0))
mem_formats = [torch.channels_last, torch.contiguous_format, torch.preserve_format, torch.channels_last_3d]
for x, mem_format in zip(data, mem_formats):
with self.assertRaisesRegex(RuntimeError, "memory format option is only supported by strided tensors"):
result = torch.empty_like(x, memory_format=mem_format)
result = torch.empty_like(x, layout=torch.strided, memory_format=mem_format)
self.assertTrue(result.layout == torch.strided)
with self.assertRaisesRegex(
RuntimeError, r"Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend"
):
dense_tensor = sparse_tensor.to_dense()
result = torch.empty_like(dense_tensor, layout=torch.sparse_coo)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_empty_like(self, device, dtype, coalesced):
# tests https://github.com/pytorch/pytorch/issues/43699
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_empty_like(input_coalesced, dtype, device, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_empty_like(input_uncoalesced, dtype, device, coalesced)
def _test_narrow(self, input, narrow_args):
expected = input.to_dense().narrow(*narrow_args)
self.assertEqual(expected, input.narrow_copy(*narrow_args).to_dense())
def _all_narrow_combs(self, shape):
for dim, dim_sz in enumerate(shape):
for start in range(dim_sz):
for length in range(dim_sz - start):
yield [dim, start, length]
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_narrow(self, device, dtype, coalesced):
shape = [3, 3, 4, 2]
input, _, _ = self._gen_sparse(4, 19, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(input, narrow_args)
self.assertRaises(RuntimeError, lambda: input.narrow_copy(-1, 0, 3)) # dim < 0
self.assertRaises(RuntimeError, lambda: input.narrow_copy(10, 0, 3)) # dim > input.dim()
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, shape[0] + 1, 3)) # start > size of dim
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, 2, shape[0])) # start+length > size of dim
with_dense, _, _ = self._gen_sparse(2, 7, shape, dtype, device, coalesced)
for narrow_args in self._all_narrow_combs(shape):
self._test_narrow(with_dense, narrow_args)
self.assertRaises(RuntimeError, lambda: with_dense.narrow_copy(10, 0, 3)) # dim > sparseDim + denseDim
def _test_log1p_tensor(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in get_all_int_dtypes()
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.log1p()
is_integral_dtype = is_integral(sparse_tensor.dtype)
self.assertEqual(expected_output, sparse_tensor.log1p().to_dense())
if is_integral_dtype:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
sparse_tensor.coalesce().log1p_()
else:
self.assertEqual(expected_output, sparse_tensor.coalesce().log1p_().to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "log1p_ requires coalesced input"):
sparse_tensor.log1p_()
if not is_integral_dtype:
sparse_tensor.requires_grad_()
self.assertTrue(sparse_tensor.requires_grad)
# test autograd
x = sparse_tensor.clone()
y = sparse_tensor.log1p()
with self.assertRaisesRegex(RuntimeError, "log1p of a sparse tensor is made to be non-differentiable"):
y.backward(x)
else:
with self.assertRaisesRegex(RuntimeError, "only Tensors of floating point dtype can require gradients"):
sparse_tensor.requires_grad_()
@coalescedonoff
@dtypes(*get_all_dtypes(include_bool=False, include_half=False,
include_bfloat16=False, include_complex=False))
def test_log1p(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2]]).transpose(1, 0),
values=torch.tensor([3.0, 4.0, 5.0]),
size=[3, ],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[1.0, 3.0], [5.0, 7.0]]),
size=[4, 5, 2],
device=device,
dtype=dtype
).coalesce()
self._test_log1p_tensor(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, 3.0, 4.0, 1.0, 1.0, 1.0]),
size=[3, ],
device=device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
device=device,
dtype=dtype
)
self._test_log1p_tensor(input_uncoalesced, coalesced)
def _test_neg_negative(self, sparse_tensor):
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.neg()
ops = (
torch.neg, torch.Tensor.neg, torch.Tensor.neg_,
torch.negative, torch.Tensor.negative, torch.Tensor.negative_,
operator.neg
)
for op in ops:
sparse_tensor_copy = sparse_tensor.clone()
self.assertEqual(expected_output, op(sparse_tensor_copy).to_dense())
if op in (torch.neg, torch.negative):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_neg_negative(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2]]),
values=torch.tensor([3.0, -4.0, 5.0]),
size=[3, ],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-1.0, 3.0], [-5.0, 7.0]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_neg_negative(input_coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([2.0, -3.0, -4.0, 1.0, -1.0, 1.5]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_neg_negative(input_uncoalesced)
def _test_asin_arcsin(self, sparse_tensor, coalesced):
def is_integral(dtype):
return dtype in get_all_int_dtypes()
is_integral_dtype = is_integral(sparse_tensor.dtype)
dense_tensor = sparse_tensor.to_dense()
expected_output = dense_tensor.asin()
ops = (
torch.asin, torch.Tensor.asin,
torch.arcsin, torch.Tensor.arcsin,
)
for op in ops:
self.assertEqual(expected_output, op(sparse_tensor).to_dense())
if op in (torch.asin, torch.arcsin):
sparse_tensor_out = torch.zeros_like(sparse_tensor)
if not is_integral_dtype:
op(sparse_tensor, out=sparse_tensor_out)
self.assertEqual(expected_output, sparse_tensor_out.to_dense())
else:
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor, out=sparse_tensor_out)
for op in (torch.Tensor.asin_, torch.Tensor.arcsin_):
if is_integral_dtype:
# test coalesce on integral dtype tensor
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op(sparse_tensor.clone().coalesce()).to_dense()
else:
self.assertEqual(expected_output, op(sparse_tensor.clone().coalesce()).to_dense())
if not coalesced:
# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError, "asin_ requires coalesced input"):
op(sparse_tensor)
@coalescedonoff
@dtypes(*get_all_dtypes(include_bool=False, include_half=False,
include_bfloat16=False, include_complex=False))
def test_asin_arcsin(self, device, dtype, coalesced):
if coalesced:
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0, 1, 2, 3]]),
values=torch.tensor([0.5, -0.5, 0.7, -0.7]),
size=[4, ],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
# hybrid sparse input
input_coalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[1, 3], [2, 4]]),
values=torch.tensor([[-0.1, 0.24], [-0.44, 0.1]]),
size=[4, 5, 2],
dtype=dtype,
device=device
).coalesce()
self._test_asin_arcsin(input_coalesced, coalesced)
if not coalesced:
# test uncoalesced input
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.tensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
values=torch.tensor([0.3, -0.3, -0.4, 0.3, -0.5, 0.15]),
size=[3, ],
dtype=dtype,
device=device
)
self._test_asin_arcsin(input_uncoalesced, coalesced)
# test on empty sparse tensor
input_uncoalesced = torch.sparse_coo_tensor(
indices=torch.zeros([2, 0]),
values=torch.zeros([0, 5, 5, 5, 5, 5, 5, 0]),
size=[0, 0, 5, 5, 5, 5, 5, 5, 0],
dtype=dtype,
device=device
)
self._test_asin_arcsin(input_uncoalesced, coalesced)
@coalescedonoff
@dtypes(torch.double)
def test_mv(self, device, dtype, coalesced):
def test_shape(di, dj, dk, nnz):
x, _, _ = self._gen_sparse(2, nnz, [di, dj], dtype, device, coalesced)
t = torch.randn(dk, dtype=dtype, device=device)
res = x.matmul(t)
expected = self.safeToDense(x).matmul(t)
self.assertEqual(res, expected)
test_shape(10, 100, 100, 20)
test_shape(100, 1000, 1000, 20)
test_shape(64, 10000, 10000, 20)
test_shape(0, 100, 100, 0)
test_shape(10, 0, 0, 0)
test_shape(10, 100, 100, 0)
test_shape(10, 100, 100, 20)
with self.assertRaisesRegex(RuntimeError, r"mv: expected self\.size\(-1\) == vec\.size\(-1\)"):
test_shape(10, 100, 10, 20)
with self.assertRaisesRegex(RuntimeError, "mv: two tensor dim should be 2 and 1"):
x, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
y, _, _ = self._gen_sparse(2, 20, [10, 100], dtype, device, coalesced)
res = x.mv(y)
@dtypes(*floating_and_complex_types())
def test_sparse_add_coalesce(self, device, dtype):
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.tensor([3, 4, 5], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3]))
y = self.sparse_tensor(i, v, torch.Size([3]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
i = self.index_tensor([[1, 2, 1]], device=device)
v = torch.empty([3, 0], dtype=dtype, device=device)
x = self.sparse_tensor(i, v, torch.Size([3, 0]))
y = self.sparse_tensor(i, v, torch.Size([3, 0]))
z = x + y
self.assertFalse(z._indices().numel() != 2 and z.is_coalesced())
@onlyCUDA
def test_storage_not_null(self):
x = torch.cuda.sparse.FloatTensor(2)
self.assertNotEqual(x.get_device(), -1)
x = torch.cuda.sparse.FloatTensor(2, 0)
self.assertNotEqual(x.get_device(), -1)
@onlyCUDA
@deviceCountAtLeast(2)
def test_same_gpu(self, devices):
def check_device(x, device_id):
self.assertEqual(x.get_device(), device_id)
self.assertEqual(x._values().get_device(), device_id)
self.assertEqual(x._indices().get_device(), device_id)
dev1, dev2 = devices[0], devices[1]
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3]), device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev2)
x = self.sparse_tensor(i, v, torch.Size([3, 0]), device=1)
check_device(x, 1)
x = self.sparse_empty(3, device=1)
check_device(x, 1)
x = self.sparse_empty(3, 0, device=1)
check_device(x, 1)
i = self.index_tensor([[2]], device=dev2)
v = torch.tensor([5], device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3])))
i = self.index_tensor([[2]], device=dev2)
v = torch.empty(1, 0, device=dev1)
# NB: non-legacy constructor allows this and moves indices
self.assertRaises(RuntimeError, lambda: self.legacy_sparse_tensor(i, v, torch.Size([3, 0])))
def _test_new_device(self, size, device=torch.cuda):
with torch.cuda.device(device):
x = torch.cuda.sparse.DoubleTensor(*size)
self.assertEqual(x.get_device(), device)
x1 = x.new()
x2 = x.new(2, 3)
self.assertEqual(x1.get_device(), device)
self.assertEqual(x2.get_device(), device)
@onlyCUDA
def test_new_device_single_gpu(self):
self._test_new_device((), 0)
self._test_new_device((30, 20), 0)
self._test_new_device((30, 20, 10), 0)
self._test_new_device((30, 20, 10, 0), 0)
@onlyCUDA
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
def test_new_device_multi_gpu(self):
self._test_new_device((), 1)
self._test_new_device((30, 20), 1)
self._test_new_device((30, 20, 10), 1)
self._test_new_device((30, 20, 10, 0), 1)
@coalescedonoff
@dtypes(torch.double, torch.cdouble)
def test_new(self, device, dtype, coalesced):
def test_shape(sparse_dims, nnz, with_size):
x, indices, values = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
if not x.is_cuda:
# CUDA sparse tensors currently requires the size to be
# specified if nDimV > 0
out = x.new(indices, values).coalesce()
x_c = x.coalesce()
self.assertEqual((out.indices(), out.values()), (x_c.indices(), x_c.values()))
self.assertEqual(x.new(indices, values, x.size()), x)
test_shape(3, 10, 100)
test_shape(3, 0, [100, 100, 0])
@onlyCPU # not really, but we only really want to run this once
@dtypes(torch.float64, torch.float32, torch.float16, torch.cfloat, torch.cdouble)
def test_factory(self, device, dtype):
for test_empty_tensor in [True, False]:
if test_empty_tensor:
default_size = torch.Size([1, 3, 0])
size = torch.Size([3, 3, 0])
else:
default_size = torch.Size([1, 3])
size = torch.Size([3, 3])
for include_size in [True, False]:
for use_tensor_idx in [True, False]:
for use_tensor_val in [True, False]:
for use_cuda in ([False] if not torch.cuda.is_available() else [True, False]):
# have to include size with cuda sparse tensors
include_size = include_size or use_cuda
long_dtype = torch.int64
device = torch.device('cpu') if not use_cuda else \
torch.device(torch.cuda.device_count() - 1)
indices = torch.tensor(([0], [2]), dtype=long_dtype) if use_tensor_idx else ([0], [2])
if test_empty_tensor:
values = torch.empty(1, 0).to(dtype)
else:
if use_tensor_val:
values = torch.tensor([1.], dtype=dtype)
else:
values = 1.
if include_size:
sparse_tensor = torch.sparse_coo_tensor(indices, values, size, dtype=dtype,
device=device, requires_grad=True)
else:
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=dtype,
device=device, requires_grad=True)
self.assertEqual(indices, sparse_tensor._indices())
self.assertEqual(values, sparse_tensor._values())
self.assertEqual(size if include_size else default_size, sparse_tensor.size())
self.assertEqual(dtype, sparse_tensor.dtype)
if use_cuda:
self.assertEqual(device, sparse_tensor._values().device)
self.assertEqual(True, sparse_tensor.requires_grad)
@dtypes(torch.double, torch.cdouble)
def test_factory_size_check(self, device, dtype):
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([.5, .5], dtype=dtype, device=device)
sizes = torch.Size([2, 3])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices.fill_(-1)
with self.assertRaisesRegex(RuntimeError, "found negative index"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([2, 3, 1, 0])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 2, 2], dtype=dtype, device=device)
sizes = torch.Size([0, 0, 2, 2])
with self.assertRaisesRegex(RuntimeError, "size is inconsistent with indices"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.tensor([[1, 1, 1], [1, 1, 1]], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[1, 2],
[0, 2]], device=device)
values = torch.empty([2, 1, 0], dtype=dtype, device=device)
sizes = torch.Size([3, 3, 2, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
def test_factory_default(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = self.index_tensor([[]], device=device)
expected_size = torch.Size([0])
self.assertEqual(tensor._indices(), expected_indices)
self.assertEqual(tensor.shape, expected_size)
def test_factory_empty_indices(self, device):
tensor = self.legacy_sparse_tensor()
expected_indices = torch.empty((1, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 0]), device=device)
expected_indices = torch.empty((2, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0]), device=device)
expected_indices = torch.empty((3, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
tensor = torch.sparse_coo_tensor(torch.Size([2, 2, 0, 0]), device=device)
expected_indices = torch.empty((4, 0), dtype=torch.long, device=device)
self.assertEqual(tensor._indices(), expected_indices)
@dtypes(torch.double, torch.cdouble)
def test_factory_nnz(self, device, dtype):
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.tensor([[1, 1], [1, 1]], dtype=dtype, device=device) # (nnz, ...): (2, 2)
sizes = torch.Size([2, 2])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
indices = self.index_tensor([[0]], device=device) # (sparse_dim, nnz): (1, 1)
values = torch.empty([2, 0], dtype=dtype, device=device) # (nnz, ...): (2, 0)
sizes = torch.Size([2, 0])
with self.assertRaisesRegex(RuntimeError, "indices and values must have same nnz"):
torch.sparse_coo_tensor(indices, values, sizes, dtype=dtype, device=device)
@dtypes(torch.double, torch.cdouble)
def test_factory_nnz_zero(self, device, dtype):
def test_shape(i_shape, v_shape, size, expected_size):
if size:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), torch.Size(size),
dtype=dtype, device=device)
else:
t = torch.sparse_coo_tensor(torch.empty(i_shape), torch.empty(v_shape), dtype=dtype, device=device)
expected_indices = torch.empty(i_shape, device=device, dtype=torch.int64)
expected_values = torch.empty(v_shape, device=device, dtype=dtype)
expected_size = torch.Size(expected_size)
self.assertEqual(t._indices(), expected_indices)
self.assertEqual(t._values(), expected_values)
self.assertEqual(t.size(), expected_size)
test_shape([1, 0], [0, 2, 4, 0], None, [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], None, [0, 0, 0, 2, 4, 0])
test_shape([1, 0], [0, 2, 4, 0], [0, 2, 4, 0], [0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [0, 0, 0, 2, 4, 0], [0, 0, 0, 2, 4, 0])
test_shape([3, 0], [0, 2, 4, 0], [1, 2, 3, 2, 4, 0], [1, 2, 3, 2, 4, 0])
@dtypes(torch.double, torch.cdouble)
def test_factory_dense_dim(self, device, dtype):
indices = self.index_tensor([[0]], device=device)
values = torch.tensor([[[1, 1, 1], [1, 1, 1]]], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
indices = self.index_tensor([[0]], device=device)
values = torch.empty([1, 2, 3, 0], dtype=dtype, device=device)
sizes = torch.Size([1, 3, 4, 0])
with self.assertRaisesRegex(RuntimeError, "values has incorrect size"):
torch.sparse_coo_tensor(indices, values, sizes)
@onlyCPU
@dtypes(torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble, torch.int64)
def test_factory_type_inference(self, device, dtype):
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1.], dtype=dtype))
self.assertEqual(dtype, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.tensor([1]))
self.assertEqual(torch.int64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.HalfTensor(1, 0))
self.assertEqual(torch.float16, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.FloatTensor(1, 0))
self.assertEqual(torch.float32, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.DoubleTensor(1, 0))
self.assertEqual(torch.float64, t.dtype)
t = torch.sparse_coo_tensor(torch.tensor(([0], [2])), torch.LongTensor(1, 0))
self.assertEqual(torch.int64, t.dtype)
@onlyCUDA
def test_factory_device_type_inference(self, device):
# both indices/values are CUDA
cpu_cuda = ('cpu', 'cuda')
cpu_cuda_none = cpu_cuda + (None,)
for indices_device, values_device, device in itertools.product(cpu_cuda,
cpu_cuda,
cpu_cuda_none):
indices = torch.tensor(([0], [2]), device=indices_device)
values = torch.tensor([1.], device=values_device)
empty_values = torch.empty(1, 0).to(values_device)
shape = (1, 3)
empty_shape = (1, 3, 0)
if device is None and indices_device != values_device:
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, values, shape, device=device)
with self.assertRaises(RuntimeError):
torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
else:
t = torch.sparse_coo_tensor(indices, values, shape, device=device)
t_empty = torch.sparse_coo_tensor(indices, empty_values, empty_shape, device=device)
should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
self.assertEqual(should_be_cuda, t.is_cuda)
self.assertEqual(t.is_cuda, t_empty.is_cuda)
@onlyCPU
def test_factory_copy(self, device):
def test_tensor(indices, values, indices_equal, values_equal):
sparse_tensor = torch.sparse_coo_tensor(indices, values, dtype=torch.float64, device=device)
if indices_equal:
self.assertEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
else:
self.assertNotEqual(indices.data_ptr(), sparse_tensor._indices().data_ptr())
if values_equal:
self.assertEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
else:
self.assertNotEqual(values.data_ptr(), sparse_tensor._values().data_ptr())
# both correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, True, True)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, True, True)
# only indices correct
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.tensor([1.], dtype=torch.float16)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, True, True) # An empty tensor's data_ptr is always equal to 0
# only values correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float64)
test_tensor(indices, values, False, True)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.DoubleTensor(1, 0)
test_tensor(indices, values, False, True)
# neither correct
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.tensor([1.], dtype=torch.float32)
test_tensor(indices, values, False, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = torch.FloatTensor(1, 0)
test_tensor(indices, values, False, True) # An empty tensor's data_ptr is always equal to 0
# complex support
indices = torch.tensor(([0], [2]), dtype=torch.int64)
values = make_tensor([1, ], dtype=torch.cdouble, device=device)
test_tensor(indices, values, True, False)
indices = torch.tensor(([0], [2]), dtype=torch.int32)
values = make_tensor([1, 1], dtype=torch.cdouble, device=device)
test_tensor(indices, values, False, False)
@onlyCPU # just run once, we test both cpu and cuda
def test_constructor_device_legacy(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
x = torch.sparse_coo_tensor(i, v, size, device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.sparse.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
x = torch.sparse_coo_tensor(i, v, size, device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(i, v, size, device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
def test_legacy_constructor(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v.storage()))
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor(v))
self.assertEqual(torch.sparse_coo, torch.sparse.FloatTensor(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: torch.sparse.FloatTensor([6]))
def test_legacy_new(self, device):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3., 4., 5.])
size = torch.Size([2, 3])
s = torch.sparse_coo_tensor(i, v, size)
self.assertEqual(torch.sparse_coo, s.new(device='cpu').layout)
self.assertRaises(TypeError, lambda: s.new(v.storage()))
self.assertRaises(TypeError, lambda: s.new(v))
self.assertEqual(torch.sparse_coo, s.new(torch.Size([2, 3])).layout)
self.assertRaises(TypeError, lambda: s.new([6]))
@onlyCPU # not really, but we only really want to run this once
def test_dtypes(self, device):
all_sparse_dtypes = get_all_dtypes(include_complex=True)
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.is_available():
do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
@onlyCPU # not really, but we only really want to run this once
def test_empty_full(self, device):
all_sparse_dtypes = get_all_dtypes(include_complex=True)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu'))
if torch.cuda.device_count() > 0:
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None)
do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0'))
def test_is_sparse(self, device):
x = torch.randn(3, 3)
self.assertFalse(x.is_sparse)
x = torch.randn(3, 3, 0)
self.assertFalse(x.is_sparse)
x = self.legacy_sparse_tensor()
self.assertTrue(x.is_sparse)
x = self.sparse_empty(1, 0, device=device)
self.assertTrue(x.is_sparse)
def test_resize_as(self, device):
def do_test(t):
y = t.new().resize_as_(t).zero_()
self.assertEqual(y.shape, t.shape)
# Check that y can be added to t. Currently, this requires that
# sparse_dim and dense_dim match.
self.assertEqual(t, t + y)
do_test(self.legacy_sparse_tensor())
do_test(self.sparse_empty([3, 0], device=device))
do_test(self.sparse_empty([3, 3], device=device))
def _test_resize_shape(self, x_i, x_v, x_size, y_i, y_v, y_size, dtype, device):
x_v_numel = torch.zeros(x_v).numel()
y_v_numel = torch.zeros(y_v).numel()
x = torch.sparse_coo_tensor(torch.zeros(x_i),
torch.arange(x_v_numel).resize_(x_v).to(torch.float),
torch.Size(x_size), dtype=dtype, device=device)
x_dense = x.to_dense()
y = torch.sparse_coo_tensor(torch.zeros(y_i),
torch.ones(y_v).to(torch.float),
torch.Size(y_size), dtype=dtype, device=device)
y_dense = y.to_dense()
x.resize_as_(y)
x_dense.resize_as_(y_dense)
self.assertEqual(x.shape, y.shape)
self.assertEqual(x.sparse_dim(), y.sparse_dim())
self.assertEqual(x.dense_dim(), y.dense_dim())
self.assertEqual(x.shape, x_dense.shape)
self.assertEqual(y.shape, y_dense.shape)
# Here we make sure that the original data are preserved after resizing
self.assertEqual(x.to_dense().view(-1)[0:x_v_numel].view(x_v),
x_dense.view(-1)[0:x_v_numel].view(x_v))
@dtypes(torch.double, torch.cdouble)
def test_resize(self, device, dtype):
# 1. Expand the size of some dense dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
self._test_resize_shape([1, 1], [1, 2, 0], [2, 2, 0],
[1, 1], [1, 2, 4], [2, 2, 4],
dtype=dtype, device=device)
# 2. Expand the size of some sparse dimensions [Supported]
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [4, 2, 3],
dtype=dtype, device=device)
# 3. Change the shapes of both sparse and dense dimensions when nnz is zero [Supported]
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 5], [1, 1, 2, 4, 5],
dtype=dtype, device=device)
self._test_resize_shape([1, 0], [0, 2, 3], [2, 2, 3],
[2, 0], [0, 2, 4, 0], [1, 1, 2, 4, 0],
dtype=dtype, device=device)
# 4. Add dims to dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 4], [2, 2, 3, 4],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3, 0], [2, 2, 3, 0],
dtype=dtype, device=device)
# 5. Remove dims from dense dimensions [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2], [2, 2],
dtype=dtype, device=device)
# 6. Change the number of sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "changing the number of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[2, 1], [1, 2, 3], [1, 2, 2, 3],
dtype=dtype, device=device)
# 7. Shrink the size of some sparse dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of sparse dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 3], [1, 2, 3],
dtype=dtype, device=device)
# 8. Shrink the size of some dense dimensions on a non-empty sparse tensor [Not Supported]
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 2], [2, 2, 2],
dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "shrinking the size of dense dimensions"):
self._test_resize_shape([1, 1], [1, 2, 3], [2, 2, 3],
[1, 1], [1, 2, 0], [2, 2, 0],
dtype=dtype, device=device)
def test_is_nonzero(self, device):
self.assertTrue(torch.sparse_coo_tensor(([0],), 1., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0., (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0], [0]), 0., (1, 1), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,), device=device).is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,), device=device).is_nonzero())
# scalar sparse tensor
self.assertTrue(torch.sparse_coo_tensor(torch.zeros(0, 1), 12.3, [], device=device).is_nonzero())
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch.sparse_coo_tensor(([0, 1],), torch.empty(2, 0), (4, 0), device=device).is_nonzero()
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertTrue(torch.sparse_coo_tensor(([0],), 2.3 - 4.5j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cfloat, device=device)
.is_nonzero())
self.assertFalse(torch.sparse_coo_tensor(([0],), 0. + 0j, (1,), dtype=torch.cdouble, device=device)
.is_nonzero())
def test_allow_tensor_metadata_change(self, device):
def do_test(t):
with self.assertRaisesRegex(
RuntimeError,
"raw_resize_ is not allowed on a Tensor created from .data or .detach()"):
t.transpose_(0, 1)
with self.assertRaisesRegex(
RuntimeError,
"resize_ is not allowed on a Tensor created from .data or .detach()"):
t.resize_as_(self.sparse_empty(3, 3))
with self.assertRaisesRegex(
RuntimeError,
"resize_and_clear_ is not allowed on a Tensor created from .data or .detach()"):
t.mul_(t)
with self.assertRaisesRegex(
RuntimeError,
"set_coalesced is not allowed on a Tensor created from .data or .detach()"):
t._coalesced_(True)
with self.assertRaisesRegex(
RuntimeError,
"set_indices_and_values_unsafe is not allowed on a Tensor created from .data or .detach()"):
a = self.sparse_tensor(torch.tensor([[0, 1, 1], [2, 0, 2]]), torch.tensor([3., 4., 5.])).data
a.add_(a)
with self.assertRaisesRegex(
RuntimeError,
"resize_and_clear_ is not allowed on a Tensor created from .data or .detach()"):
a.zero_()
with self.assertRaisesRegex(
RuntimeError,
"resize_ is not allowed on a Tensor created from .data or .detach()"):
a.copy_(self.sparse_empty(3, 3))
do_test(self.sparse_empty([3, 0], device=device).data)
do_test(self.sparse_empty([3, 0], device=device).detach())
@dtypes(torch.double, torch.cdouble)
def test_change_tensor_metadata(self, device, dtype):
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]), dtype=dtype, device=device)
i.resize_(2, 3)
v.resize_(4, 5)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.resize_as_(self.index_tensor([0, 1], device=device))
v.resize_as_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.as_strided_((2, 1), (1, 1))
v.as_strided_((1, 3), (1, 1))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.set_(self.index_tensor([0, 1], device=device))
v.set_(torch.tensor([3, 4, 5], dtype=dtype, device=device))
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
i = self.index_tensor([[0], [1]], device=device)
v = torch.tensor([[3, 4, 5]], dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, torch.Size([1, 2, 3]))
i.transpose_(0, 1)
v.transpose_(0, 1)
self.assertEqual(list(t.coalesce().indices().size()), [2, 1])
self.assertEqual(list(t.coalesce().values().size()), [1, 3])
@skipIfRocm
@coalescedonoff
@dtypes(torch.double)
def test_pickle(self, device, dtype, coalesced):
import pickle
shape_sparse_dim_nnz = [
((), 0, 2),
((0,), 0, 10),
((2,), 0, 3),
((100, 3), 1, 3),
((100, 20, 3), 2, 0),
((10, 0, 3), 0, 3),
((10, 0, 3), 0, 0),
]
for shape, sparse_dim, nnz in shape_sparse_dim_nnz:
indices_shape = torch.Size((sparse_dim, nnz))
values_shape = torch.Size((nnz,) + shape[sparse_dim:])
indices = torch.arange(indices_shape.numel(), dtype=self.index_tensor(0).dtype,
device=device).view(indices_shape)
for d in range(sparse_dim):
indices[d].clamp_(max=(shape[d] - 1)) # make it valid index
if not coalesced and indices.numel() > 0:
indices[:, -1] = indices[:, 0] # make it uncoalesced
values_numel = values_shape.numel()
values = torch.arange(values_numel, dtype=dtype,
device=device).view(values_shape).div_(values_numel / 2.)
sp_tensor = self.sparse_tensor(indices, values, shape)
serialized = pickle.dumps(sp_tensor)
sp_tensor_loaded = pickle.loads(serialized)
self.assertEqual(sp_tensor, sp_tensor_loaded)
def test_any(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([False, False]), device=device)
t_any = torch.tensor(False)
self.assertEqual(torch.any(t), t_any)
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([True, False]), device=device)
t_any = torch.tensor(True)
self.assertEqual(torch.any(t), t_any)
def test_isnan(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([1, 4]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([False, False]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([1, float("nan")]), device=device)
t_nan = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([False, True]), device=device)
self.assertEqual(torch.isnan(t).int(), t_nan.int())
@coalescedonoff
@dtypes(torch.float32, torch.float64)
def test_div_rounding_mode(self, device, dtype, coalesced):
sparse, _, _ = self._gen_sparse(2, 10, (10, 10), dtype,
device, coalesced)
dense = self.safeToDense(sparse)
for mode in (None, 'floor', 'trunc'):
actual = sparse.div(-2, rounding_mode=mode)
expect = dense.div(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test inplace
actual = sparse.clone().div_(-2, rounding_mode=mode)
self.assertEqual(self.safeToDense(actual), expect)
# Test out argument
actual.zero_()
torch.div(sparse, -2, rounding_mode=mode, out=actual)
self.assertEqual(self.safeToDense(actual), expect)
def test_div_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse division requires',
lambda: torch.tensor(1., device=device).to_sparse()
/ torch.tensor(1., device=device).to_sparse())
def test_floor_divide_by_sparse_error(self, device):
self.assertRaisesRegex(RuntimeError, 'Sparse division requires',
lambda: torch.tensor(1., device=device).to_sparse()
// torch.tensor(1., device=device).to_sparse())
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
@onlyCPU
def test_sparse_to_numpy(self, device):
t = torch.sparse_coo_tensor(torch.tensor(([0, 0], [2, 0])), torch.tensor([1, 4]))
self.assertRaises(TypeError, lambda: t.numpy())
@coalescedonoff
@dtypes(torch.double)
def test_softmax(self, device, dtype, coalesced):
import torch.nn.functional as F
def to_dense(sparse, fill_value=None):
"""
Return dense tensor from a sparse tensor using given fill value.
"""
if fill_value is None or fill_value == 0:
return sparse.to_dense()
sparse = sparse.coalesce()
dense = torch.full(sparse.shape, fill_value, dtype=sparse.dtype, device=sparse.device)
for idx, value in zip(sparse._indices().t(), sparse._values()):
dense[tuple(idx)] = value
return dense
def softmax_to_dense(sparse, dim):
"""Dense softmax of a sparse tensor. Useful only for testing softmax
correctness.
When computing softmax of a sparse tensor, the value of
unspecified items is negative infinity rather than zero so
that
softmax(sparse.to_dense(fill_value=-inf), dim) == softmax(sparse, dim).to_dense()
holds for non-empty lines. One empty lines, the softmax
values are defined as 0 in order to preserve the sparsity
of result.
Note that in PyTorch, ``to_dense`` method does not
implement the ``fill_value`` keyword argument.
"""
dtype = sparse.dtype
device = sparse.device
dense = to_dense(sparse, fill_value=-float('inf'))
r = F.softmax(dense, dim)
# softmax on empty lines results nan, replace with zeros to match the definition
r[r != r] = 0
return r
def sparse_softmax(sparse, dim):
"""Pure Python softmax of a sparse tensor. Assuming -inf for
unspecified sparse tensor data. This is a prototype of
sparse softmax algorithm in Python.
"""
dtype = sparse.dtype
device = sparse.device
# softmax is non-linear operation, so sparse tensors must
# be coalesced.
sparse = sparse.coalesce()
inf = float('inf')
indices = sparse._indices()
values = sparse._values()
if dim < sparse.sparse_dim():
nnz = sparse._nnz()
# compute pool indices
size = sparse.size()
strides = torch.ones((sparse.sparse_dim(), 1), dtype=indices.dtype, device=indices.device)
for i in reversed(range(sparse.sparse_dim() - 1)):
strides[i, 0] = strides[i + 1, 0] * size[i + 1]
strides[dim, 0] = 0
pool = (indices * strides).sum(dim=0)
i2p = {}
for i in range(nnz):
c = int(pool[i])
if c not in i2p:
i2p[c] = len(i2p)
pool[i] = i2p[c]
# compute max
dense_size = tuple(size[sparse.sparse_dim():])
mx = torch.empty((pool.max() + 1,) + dense_size, dtype=dtype, device=device)
mx[:] = -inf
for n in range(nnz):
p = pool[n]
mx[p] = torch.max(mx[p], values[n])
# apply exp to (v - mx) and sum the results
exp_values = torch.empty_like(values)
exp_sums = torch.zeros_like(mx)
for n in range(nnz):
p = pool[n]
v = exp_values[n] = (values[n] - mx[p]).exp()
exp_sums[p] = exp_sums[p] + v
# normalize with the sum of exponents
for n in range(nnz):
p = pool[n]
exp_values[n] = exp_values[n] / exp_sums[p]
return torch.sparse_coo_tensor(indices,
exp_values,
sparse.size(),
dtype=dtype, device=device)
elif dim < sparse.sparse_dim() + sparse.dense_dim():
return torch.sparse_coo_tensor(indices,
F.softmax(values, dim - sparse.sparse_dim() + 1),
sparse.size(),
dtype=dtype, device=device)
else:
raise ValueError(
'`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`'
% (dim, sparse.sparse_dim(), sparse.dense_dim()))
def softmax_jacobian_analytic(x, dim):
"""Return Jacobian of softmax using analytic formula
D_jS_i = S_i * (1[i==j] - S_j).
where S = softmax(x, dim), x is dense tensor, i,j in
range(x.shape[dim]).
"""
y = F.softmax(x, dim)
y[y != y] = 0 # replace nan-s with zeros
J = torch.zeros((x.shape[dim],) + tuple(x.shape), dtype=x.dtype, device=x.device)
si = [slice(None)] * len(y.shape)
sj = [slice(None)] * len(y.shape)
s = [slice(None)] * len(J.shape)
for i in range(y.shape[dim]):
si[dim] = i
s[dim + 1] = i
yi = y[tuple(si)]
for j in range(y.shape[dim]):
sj[dim] = j
s[0] = j
if i == j:
J[tuple(s)] = yi * (1 - yi)
else:
yj = y[tuple(sj)]
J[tuple(s)] = - yi * yj
sj[dim] = slice(None)
si[dim] = slice(None)
s[dim + 1] = slice(None)
return J
def softmax_jacobian_autograd(x, dim, log=False):
"""Return Jacobian of softmax using PyTorch autograd feature.
x can be dense or sparse tensor.
"""
import itertools
if x.is_sparse:
x = x.coalesce()
dtype = x.dtype
device = x.device
shape = tuple(x.shape)
J = torch.zeros((shape[dim],) + shape, dtype=dtype, device=device)
for i in range(shape[dim]):
if x.is_sparse:
sparse_dim = x.sparse_dim()
dense_dim = x.dense_dim()
if dim < sparse_dim:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
if dim == j:
ranges.append([i])
else:
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.ones((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
else:
ranges = []
for j, sz in enumerate(shape[:sparse_dim]):
ranges.append(list(range(sz)))
indices = torch.tensor(list(itertools.product(*ranges)), dtype=torch.long, device=device).t()
values = torch.zeros((indices.shape[1],) + shape[sparse_dim:], dtype=dtype, device=device)
sv = [slice(None)] * (dense_dim + 1)
sv[dim - sparse_dim + 1] = i
values[tuple(sv)] = 1
v = torch.sparse_coo_tensor(indices, values, shape, dtype=dtype, device=device)
else:
v = torch.zeros_like(x)
sv = [slice(None)] * len(v.shape)
sv[dim] = i
v[tuple(sv)] = 1
x_ = x.clone()
x_.requires_grad_(True)
if log:
if x_.is_sparse:
y = torch.sparse.log_softmax(x_, dim)
else:
y = F.log_softmax(x_, dim)
else:
if x_.is_sparse:
y = torch.sparse.softmax(x_, dim)
else:
y = F.softmax(x_, dim)
# replace nan-s with zeros
y.data[y != y] = 0
y.backward(v)
g = x_.grad
if not g.is_sparse:
# replace nan-s with zeros
g.data[g != g] = 0
J[i] = g.to_dense() if g.is_sparse else g
return J
def test_op(sparse_dims, nnz, with_size, coalesced):
if isinstance(with_size, Number):
with_size = [with_size] * sparse_dims
x, i, v = self._gen_sparse(sparse_dims, nnz, with_size, dtype, device, coalesced)
def sparse_log(x):
return torch.sparse_coo_tensor(x._indices(), x._values().log(),
x.size(), dtype=x.dtype, device=x.device)
for dim in range(x.sparse_dim() + x.dense_dim()):
# Check sparse softmax definition
# check Python sparse softmax
y = sparse_softmax(x, dim)
r1 = softmax_to_dense(x, dim)
r2 = y.to_dense()
self.assertEqual(r1, r2)
# check C++ sparse softmax
y1 = torch.sparse.softmax(x, dim)
self.assertEqual(y, y1)
# check C++ sparse log_softmax
ly1 = torch.sparse.log_softmax(x, dim)
self.assertEqual(ly1, sparse_log(y1))
# Check autograd support on sparse softmax
# check softmax Jacobian definition for dense input
x1 = to_dense(x, fill_value=float('-inf'))
J = softmax_jacobian_analytic(x1, dim)
assert J.shape[0] == x.shape[dim]
assert J.shape[dim + 1] == x.shape[dim]
# check softmax Jacobian from autograd, dense input
J2 = softmax_jacobian_autograd(x1, dim)
self.assertEqual(J, J2)
# check softmax Jacobian from autograd, sparse input
J3 = softmax_jacobian_autograd(x, dim)
self.assertEqual(J, J3)
'''
y = softmax(x, dim)
z = log(y) = log_softmax(x, dim)
Dy/Dx = J
Dz/Dx = Dz/Dy Dy/Dx = 1/y * J
=> J = J_log * y
'''
# log_softmax Jacobian from autograd, dense input
J2_log = softmax_jacobian_autograd(x1, dim, log=True)
# log_softmax Jacobian from autograd, sparse input
J3_log = softmax_jacobian_autograd(x, dim, log=True)
J = J.transpose(0, dim + 1)
J2_log = J2_log.transpose(0, dim + 1)
J3_log = J3_log.transpose(0, dim + 1)
self.assertEqual(J, J2_log * r1)
self.assertEqual(J, J3_log * r1)
if dim == 0:
# check dtype argument
other_dtype = torch.float32
y2 = torch.sparse.softmax(x, dim, dtype=other_dtype)
self.assertEqual(y2.dtype, other_dtype)
self.assertEqual(y2, y1.type(other_dtype))
ly2 = torch.sparse.log_softmax(x, dim, dtype=other_dtype)
self.assertEqual(ly2.dtype, other_dtype)
self.assertEqual(ly2, ly1.type(other_dtype))
test_op(1, 10, [3], coalesced)
test_op(1, 10, [2, 3], coalesced)
test_op(1, 10, [3, 2], coalesced)
test_op(2, 10, [2, 3, 4], coalesced)
test_op(2, 10, [3, 4], coalesced)
test_op(2, 5, [5, 4], coalesced)
test_op(2, 10, [3, 4, 2], coalesced)
test_op(3, 10, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2], coalesced)
test_op(3, 100, [3, 4, 2, 3], coalesced)
test_op(3, 100, [3, 4, 2, 3, 5, 2], coalesced)
test_op(4, 100, [3, 4, 2, 3, 5, 2], coalesced)
# TODO: Check after why ROCm's cusparseXcsrgemm2Nnz function doesn't return the same nnz value as CUDA
@skipIfRocm
@coalescedonoff
@dtypes(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_half=False, include_bfloat16=False))
@dtypesIfCUDA(*((torch.complex64,) if CUDA11OrLater else ()),
*((torch.complex128,) if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else ()),
*get_all_fp_dtypes(
include_half=(CUDA11OrLater and SM53OrLater),
include_bfloat16=(CUDA11OrLater and SM80OrLater)))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2, torch.complex64: 1e-2, torch.float32: 1e-2})
def test_sparse_matmul(self, device, dtype, coalesced):
"""
This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors.
"""
def ref_sparse_mm(a, b):
return a.to_dense() @ b.to_dense()
def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):
def test_grad_dense(a_s, b_s, g_s):
a = a_s.to_dense().detach()
b = b_s.to_dense().detach()
g = g_s.to_dense().detach()
a.requires_grad_(True)
b.requires_grad_(True)
c = a @ b
c.backward(g)
return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())
a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
a.requires_grad_(True)
b.requires_grad_(True)
c = torch.sparse.mm(a, b)
c2 = c.to_dense().detach()
c2 = torch.rand_like(c2)
g = c2.sparse_mask(c.coalesce())
c.backward(g)
a_grad, b_grad = test_grad_dense(a, b, g)
self.assertEqual(a.grad, a_grad)
self.assertEqual(b.grad, b_grad)
def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
# dense implementation
r1 = ref_sparse_mm(a, b)
# cpp implementation
r2 = torch.sparse.mm(a, b)
self.assertEqual(r1, r2.to_dense())
if dtype in [torch.double, torch.cdouble]:
a.requires_grad_(True)
b.requires_grad_(True)
# check autograd support on sparse matmul
def fn(D1, D2):
return torch.sparse.mm(D1, D2).to_dense()
if a.is_cuda:
# For cuda, `nondet_tol` is set with `1e-5`
# This is because cuSparse sometimes returns approximate zero values like `~e-323`
# TODO: Check this cuSparse issue.
# This happens when you do chain multiplication `torch.sparse.mm` operations
gradcheck(fn, (a, b), check_sparse_nnz=True, nondet_tol=1e-5)
else:
gradcheck(fn, (a, b), check_sparse_nnz=True)
grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)
def test_error_cases():
def fn(sparse_dims, nnz, shape_a, shape_b):
a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)
r2 = torch.sparse.mm(a, b)
# This is not a matrix
self.assertRaises(RuntimeError, lambda: fn(3, 4, [2, 2, 2], [2, 2, 2]))
# Shapes does not
self.assertRaisesRegex(RuntimeError,
r"mat1 and mat2 shapes cannot be multiplied \(2x3 and 4x2\)",
lambda: fn(2, 10, [2, 3], [4, 2]))
def different_dtypes():
a, i_a, v_a = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
b, i_b, v_b = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)
r2 = torch.sparse.mm(a.to(torch.float64), a.to(torch.float32))
self.assertRaisesRegex(RuntimeError, 'mat1 dtype Double does not match mat2 dtype Float', different_dtypes)
for n in range(2, 5):
for m in range(2, 8):
for p in range(2, 8):
test_sparse_matmul(2, 10, [n, m], [m, p])
test_sparse_matmul(2, 0, [0, 0], [0, 0])
test_sparse_matmul(2, 0, [0, 10], [10, 0])
test_error_cases()
@coalescedonoff
@dtypes(torch.double)
def test_assign(self, device, dtype, coalesced):
def assign_to():
a, i_a, v_a = self._gen_sparse(2, 5, [2, 3], dtype, device, coalesced)
a[0] = 100
self.assertRaises(TypeError, assign_to)
def test_cpu_sparse_dense_mul(self, device):
# general multiplication is not supported, but 0dim multiplication is supported
s = torch.sparse_coo_tensor([[0], [1]], [5.0], (2, 3), device=device)
t23 = s.to_dense()
t0 = torch.tensor(2.0, device=device)
r = s * 2.0
self.assertEqual(r, 2.0 * s)
self.assertEqual(r, t0 * s)
self.assertEqual(r, s * t0)
if device == 'cpu':
with self.assertRaisesRegex(RuntimeError, r"mul\(sparse, dense\) is not supported"):
s * t23
with self.assertRaisesRegex(RuntimeError, r"mul\(dense, sparse\) is not supported"):
t23 * s
elif device == 'cuda':
with self.assertRaisesRegex(NotImplementedError, "CUDA"):
s * t23
with self.assertRaisesRegex(NotImplementedError, "CUDA"):
t23 * s
class TestSparseOneOff(TestCase):
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_from_cpu(self):
with self.assertRaisesRegex(
RuntimeError,
"backend of indices \\(CUDA\\) must match backend of values \\(CPU\\)"):
torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4),
[3, 4, 4])
with self.assertRaisesRegex(
RuntimeError,
"backend of indices \\(CUDA\\) must match backend of values \\(CPU\\)"):
torch.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0),
[3, 4, 4, 0])
with self.assertRaisesRegex(
RuntimeError,
"backend of indices \\(CUDA\\) must match backend of values \\(CPU\\)"):
torch.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0),
[0, 4, 4, 0])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_sparse_cpu_dense_add(self):
x = torch.zeros(3, 4, 4)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4).cuda(),
[3, 4, 4])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(3, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0).cuda(),
[3, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(0, 4, 4, 0)
sparse_y = torch.cuda.sparse.FloatTensor(torch.LongTensor(1, 0).cuda(),
torch.randn(0, 4, 4, 0).cuda(),
[0, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
def _sparse_to_dense(tensor):
if tensor.dtype != torch.bool:
return tensor.to_dense()
# to_dense uses coalesce which isn't implemented for bool
return tensor.to(torch.int8).to_dense().to(torch.bool)
_sparse_unary_ops = ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
class TestSparseUnaryUfuncs(TestCase):
exact_dtype = True
@_sparse_unary_ops
def test_sparse_consistency(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if len(samples) == 0:
self.skipTest("Skipped! No sample inputs!")
sample = samples[0]
assert isinstance(sample.input, torch.Tensor)
expected = op(sample.input, *sample.args, **sample.kwargs)
assert torch.is_tensor(expected)
output = op(sample.input.to_sparse(), *sample.args, **sample.kwargs)
assert torch.is_tensor(output)
self.assertEqual(_sparse_to_dense(output), expected)
@_sparse_unary_ops
def test_out(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if len(samples) == 0:
self.skipTest("Skipped! No sample inputs!")
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
sample = samples[0]
sample.input = sample.input.to_sparse()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = torch.zeros(sample.input.shape, device=device,
dtype=expect.dtype, layout=torch.sparse_coo)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@_sparse_unary_ops
def test_inplace(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if len(samples) == 0:
self.skipTest("Skipped! No sample inputs!")
if op.inplace_variant is None:
self.skipTest("Skipped! Out not supported")
sample = samples[0]
sample.input = sample.input.to_sparse().coalesce()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type .* can't be cast to"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
return
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@_sparse_unary_ops
def test_sparse_zero_dims(self, device, dtype, op):
# test 0x0 sparse_coo_tensor
indices = torch.empty(2, 0, dtype=torch.int64)
values = torch.empty(0, dtype=dtype)
sparse_0x0 = torch.sparse_coo_tensor(indices, values, (0, 0))
expected = torch.sparse_coo_tensor(indices, op(values), (0, 0))
actual = op(sparse_0x0)
self.assertEqual(expected, actual)
@_sparse_unary_ops
def test_sparse_zeros(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
zero_input = torch.zeros((), device=device, dtype=dtype)
sparse_input = torch.zeros((), dtype=dtype, device=device,
layout=torch.sparse_coo)
expect = op(zero_input)
actual = op(sparse_input)
self.assertEqual(expect, _sparse_to_dense(actual))
@ops(sparse_unary_ufuncs, dtypes=OpDTypes.supported,
allowed_dtypes=[torch.double, torch.cdouble])
def test_sparse_fn_grad(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Op doesn't support autograd")
for sample in op.sample_inputs(device, dtype):
sparse_input = sample.input.to_sparse().detach().requires_grad_(True)
def fn(x):
return _sparse_to_dense(
op(x, *sample.args, **sample.kwargs))
self.assertTrue(gradcheck(
fn,
(sparse_input,),
check_batched_grad=False,
check_grad_dtypes=True,
check_sparse_nnz=True,
nondet_tol=op.gradcheck_nondet_tol,
fast_mode=op.gradcheck_fast_mode))
# e.g., TestSparseUnaryUfuncsCPU and TestSparseUnaryUfuncsCUDA
instantiate_device_type_tests(TestSparseUnaryUfuncs, globals(), except_for='meta')
# e.g., TestSparseCPU and TestSparseCUDA
instantiate_device_type_tests(TestSparse, globals(), except_for='meta')
if __name__ == '__main__':
run_tests()
|
the-stack_106_23747 | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for building the input features for the AlphaFold model."""
import os
from typing import Mapping, Optional, Sequence, List, Dict
from absl import logging
from alphafold.common import residue_constants
from alphafold.data import parsers
from alphafold.data import templates
from alphafold.data.tools import hhblits
from alphafold.data.tools import hhsearch
from alphafold.data.tools import jackhmmer
#from alphafold.data.tools import mmseqs as mmseqs2
import numpy as np
from dataclasses import dataclass
from string import ascii_uppercase
import pickle
from pathlib import Path
import random
# Internal import (7716).
FeatureDict = Mapping[str, np.ndarray]
@dataclass
class Mock_Template_Result():
features: FeatureDict
def make_mock_template(query_sequence: str):
# since alphafold's model requires a template input
# we create a blank example w/ zero input, confidence -1
ln = len(query_sequence)
output_templates_sequence = "-" * ln
output_confidence_scores = np.full(ln, -1)
templates_all_atom_positions = np.zeros(
(ln, templates.residue_constants.atom_type_num, 3))
templates_all_atom_masks = np.zeros(
(ln, templates.residue_constants.atom_type_num))
templates_aatype = templates.residue_constants.sequence_to_onehot(
output_templates_sequence,
templates.residue_constants.HHBLITS_AA_TO_ID)
template_features = {
'template_all_atom_positions': templates_all_atom_positions[None],
'template_all_atom_masks': templates_all_atom_masks[None],
'template_sequence': np.array([f'none'.encode()]),
'template_aatype': np.array(templates_aatype)[None],
'template_confidence_scores': output_confidence_scores[None],
'template_domain_names': np.array([f'none'.encode()]),
'template_release_date': np.array([f'none'.encode()])
}
return Mock_Template_Result(features=template_features)
def _placeholder_template_feats(num_templates_, num_res_):
return {
'template_aatype':
np.zeros([num_templates_, num_res_, 22], np.float32),
'template_all_atom_masks':
np.zeros([num_templates_, num_res_, 37, 3], np.float32),
'template_all_atom_positions':
np.zeros([num_templates_, num_res_, 37], np.float32),
'template_domain_names':
np.zeros([num_templates_], np.float32),
'template_sum_probs':
np.zeros([num_templates_], np.float32),
}
def make_sequence_features(sequence: str,
description: str,
num_res: int,
homooligomer: int = 1) -> FeatureDict:
"""Constructs a feature dict of sequence features."""
Ln: int = len(sequence)
Ls: Sequence[int] = [Ln] * homooligomer
num_res = num_res * homooligomer
sequence = sequence * homooligomer
features = {}
features['aatype'] = residue_constants.sequence_to_onehot(
sequence=sequence,
mapping=residue_constants.restype_order_with_x,
map_unknown_to_x=True)
features['between_segment_residues'] = np.zeros((num_res, ),
dtype=np.int32)
features['domain_name'] = np.array([description.encode('utf-8')],
dtype=np.object_)
features['residue_index'] = np.array(range(num_res), dtype=np.int32)
features['seq_length'] = np.array([num_res] * num_res, dtype=np.int32)
features['sequence'] = np.array([sequence.encode('utf-8')],
dtype=np.object_)
if homooligomer > 1:
# add big enough number to residue index to indicate chain breaks between oligomers
idx_res = features['residue_index']
L_prev = 0
# Ls: number of residues in each chain
for L_i in Ls[:-1]:
idx_res[L_prev + L_i:] += 200
L_prev += L_i
chains = list("".join(
[ascii_uppercase[n] * L for n, L in enumerate(Ls)]))
features['residue_index'] = idx_res
return features
def make_msa_features(msas: Sequence[Sequence[str]],
deletion_matrices: Sequence[parsers.DeletionMatrix],
Ln: int,
msa_size_gb: float,
homooligomer: int = 1) -> FeatureDict:
"""Constructs a feature dict of MSA features."""
if not msas:
raise ValueError('At least one MSA must be provided.')
# Flatten and denormalize the MSA. The denormalized form has every
# sequence from all the MSAs, times the number of homooligomers.
denorm_msa = []
denorm_deletion_matrix = []
for msa_idx, (msa, deletion_matrix) in enumerate(zip(msas, deletion_matrices)):
if not msa:
raise ValueError(
f'MSA {msa_idx} must contain at least one sequence.')
for sequence, deletion_row in zip(msa, deletion_matrix):
for olig_idx in range(homooligomer):
L = Ln * olig_idx
R = Ln * (homooligomer - (olig_idx + 1))
denorm_msa.append("-" * L + sequence + "-" * R)
denorm_deletion_matrix.append([0] * L + deletion_row + [0] * R)
# 1.99 GB Max size, size of row in msa array = Ln * 4 bytes (int32)
max_msa_sequences = (msa_size_gb * 1024 * 1024 * 1024) // (Ln * homooligomer * 4)
# Randomly select a subset of the flattened form and convert to ints.
int_msa = []
deletion_matrix = []
seen_sequences = set()
for index in random.sample(range(len(denorm_msa)), k=len(denorm_msa)):
sequence = denorm_msa[index]
deletion_row = denorm_deletion_matrix[index]
# Don't add duplicate sequences to the MSA.
if sequence in seen_sequences:
continue
seen_sequences.add(sequence)
int_msa.append(
[residue_constants.HHBLITS_AA_TO_ID[res] for res in sequence])
deletion_matrix.append(deletion_row)
if len(seen_sequences) >= max_msa_sequences:
break
num_res = len(denorm_msa[0])
num_alignments = len(int_msa)
features = {}
features['deletion_matrix_int'] = np.array(deletion_matrix, dtype=np.int32)
features['msa'] = np.array(int_msa, dtype=np.int32)
features['num_alignments'] = np.array([num_alignments] * num_res,
dtype=np.int32)
return features
def homooligomerize(msas, deletion_matrices, homooligomer=1):
'''
From https://github.com/sokrypton/ColabFold/blob/main/beta/colabfold.py
'''
if homooligomer == 1:
return msas, deletion_matrices
else:
new_msas = []
new_mtxs = []
for o in range(homooligomer):
for msa, mtx in zip(msas, deletion_matrices):
num_res = len(msa[0])
L = num_res * o
R = num_res * (homooligomer - (o + 1))
new_msas.append(["-" * L + s + "-" * R for s in msa])
new_mtxs.append([[0] * L + m + [0] * R for m in mtx])
return new_msas, new_mtxs
def homooligomerize_heterooligomer(msas, deletion_matrices, lengths,
homooligomers):
'''
From https://github.com/sokrypton/ColabFold/blob/main/beta/colabfold.py
----- inputs -----
msas: list of msas
deletion_matrices: list of deletion matrices
lengths: list of lengths for each component in complex
homooligomers: list of number of homooligomeric copies for each component
----- outputs -----
(msas, deletion_matrices)
'''
if max(homooligomers) == 1:
return msas, deletion_matrices
elif len(homooligomers) == 1:
return homooligomerize(msas, deletion_matrices, homooligomers[0])
else:
frag_ij = [[0, lengths[0]]]
for length in lengths[1:]:
j = frag_ij[-1][-1]
frag_ij.append([j, j + length])
# for every msa
mod_msas, mod_mtxs = [], []
for msa, mtx in zip(msas, deletion_matrices):
mod_msa, mod_mtx = [], []
# for every sequence
for n, (s, m) in enumerate(zip(msa, mtx)):
# split sequence
_s, _m, _ok = [], [], []
for i, j in frag_ij:
_s.append(s[i:j])
_m.append(m[i:j])
_ok.append(max([o != "-" for o in _s[-1]]))
if n == 0:
# if first query sequence
mod_msa.append("".join(
[x * h for x, h in zip(_s, homooligomers)]))
mod_mtx.append(
sum([x * h for x, h in zip(_m, homooligomers)], []))
elif sum(_ok) == 1:
# elif one fragment: copy each fragment to every homooligomeric copy
a = _ok.index(True)
for h_a in range(homooligomers[a]):
_blank_seq = [["-" * l] * h
for l, h in zip(lengths, homooligomers)]
_blank_mtx = [[[0] * l] * h
for l, h in zip(lengths, homooligomers)]
_blank_seq[a][h_a] = _s[a]
_blank_mtx[a][h_a] = _m[a]
mod_msa.append("".join(
["".join(x) for x in _blank_seq]))
mod_mtx.append(
sum([sum(x, []) for x in _blank_mtx], []))
else:
# else: copy fragment pair to every homooligomeric copy pair
for a in range(len(lengths) - 1):
if _ok[a]:
for b in range(a + 1, len(lengths)):
if _ok[b]:
for h_a in range(homooligomers[a]):
for h_b in range(homooligomers[b]):
_blank_seq = [
["-" * l] * h for l, h in zip(
lengths, homooligomers)
]
_blank_mtx = [
[[0] * l] * h for l, h in zip(
lengths, homooligomers)
]
for c, h_c in zip([a, b],
[h_a, h_b]):
_blank_seq[c][h_c] = _s[c]
_blank_mtx[c][h_c] = _m[c]
mod_msa.append("".join([
"".join(x) for x in _blank_seq
]))
mod_mtx.append(
sum([
sum(x, [])
for x in _blank_mtx
], []))
mod_msas.append(mod_msa)
mod_mtxs.append(mod_mtx)
return mod_msas, mod_mtxs
def chain_break(idx_res, Ls, length=200):
'''From https://github.com/sokrypton/ColabFold/blob/main/beta/colabfold.py'''
# Minkyung's code
# add big enough number to residue index to indicate chain breaks
L_prev = 0
for L_i in Ls[:-1]:
idx_res[L_prev + L_i:] += length
L_prev += L_i
return idx_res
class DataPipeline:
"""Runs the alignment tools and assembles the input features."""
def __init__(self,
jackhmmer_binary_path: str,
hhblits_binary_path: str,
hhsearch_binary_path: str,
uniref90_database_path: str,
mgnify_database_path: str,
bfd_database_path: Optional[str],
uniclust30_database_path: Optional[str],
small_bfd_database_path: Optional[str],
pdb70_database_path: str,
template_featurizer: templates.TemplateHitFeaturizer,
mmseqs_binary_path: str,
mmseqs_uniref50_database_path: str,
mmseqs_mgnify_database_path: str,
mmseqs_small_bfd_database_path: str,
mmseqs: bool,
use_small_bfd: bool,
tmp_dir: Path,
mgnify_max_hits: int = 501,
uniref_max_hits: int = 25000,
bfd_max_hits: int = 25000):
"""Constructs a feature dict for a given FASTA file."""
self._use_small_bfd = use_small_bfd
self.jackhmmer_uniref90_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=uniref90_database_path,
tmp_dir=tmp_dir,
get_tblout=True)
if use_small_bfd:
self.jackhmmer_small_bfd_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=small_bfd_database_path,
tmp_dir=tmp_dir,
get_tblout=True)
else:
self.hhblits_bfd_uniclust_runner = hhblits.HHBlits(
binary_path=hhblits_binary_path,
databases=[bfd_database_path, uniclust30_database_path],
tmp_dir=tmp_dir)
self.jackhmmer_mgnify_runner = jackhmmer.Jackhmmer(
binary_path=jackhmmer_binary_path,
database_path=mgnify_database_path,
tmp_dir=tmp_dir,
get_tblout=True)
self.hhsearch_pdb70_runner = hhsearch.HHSearch(
binary_path=hhsearch_binary_path,
databases=[pdb70_database_path],
tmp_dir=tmp_dir)
#self.mmseqs_runner = mmseqs2.MMSeqs(
# binary_path=mmseqs_binary_path,
# uniref50_database_path=mmseqs_uniref50_database_path,
# mgnify_database_path=mmseqs_mgnify_database_path,
# small_bfd_database_path=mmseqs_small_bfd_database_path)
self.template_featurizer = template_featurizer
self.mgnify_max_hits = mgnify_max_hits
self.uniref_max_hits = uniref_max_hits
self.bfd_max_hits = bfd_max_hits
def process(self,
input_fasta_path: str,
msa_output_dir: str,
msa_size_gb: float,
homooligomer: str = '1') -> FeatureDict:
"""Runs alignment tools on the input sequence and creates features."""
with open(input_fasta_path) as f:
input_fasta_str = f.read()
input_seqs, input_descs = parsers.parse_fasta(input_fasta_str)
if len(input_seqs) != 1:
raise ValueError(
f'More than one input sequence found in {input_fasta_path}.')
assert len(homooligomer) == 1
homooligomer = int(homooligomer)
input_sequence = input_seqs[0]
input_description = input_descs[0]
num_res = len(input_sequence)
uniref90_out_path = os.path.join(msa_output_dir, 'uniref90_hits.sto')
if not os.path.exists(uniref90_out_path):
jackhmmer_uniref90_result = self.jackhmmer_uniref90_runner.query(
input_fasta_path)[0]
with open(uniref90_out_path, 'w') as f:
f.write(jackhmmer_uniref90_result['sto'])
else:
jackhmmer_uniref90_result = {}
with open(uniref90_out_path, 'r') as f:
jackhmmer_uniref90_result['sto'] = f.read()
mgnify_out_path = os.path.join(msa_output_dir, 'mgnify_hits.sto')
if not os.path.exists(mgnify_out_path):
jackhmmer_mgnify_result = self.jackhmmer_mgnify_runner.query(
input_fasta_path)[0]
with open(mgnify_out_path, 'w') as f:
f.write(jackhmmer_mgnify_result['sto'])
else:
jackhmmer_mgnify_result = {}
with open(uniref90_out_path, 'r') as f:
jackhmmer_mgnify_result['sto'] = f.read()
pdb70_out_path = os.path.join(msa_output_dir, 'pdb70_hits.hhr')
if not os.path.exists(pdb70_out_path):
uniref90_msa_as_a3m = parsers.convert_stockholm_to_a3m(
jackhmmer_uniref90_result['sto'],
max_sequences=self.uniref_max_hits)
hhsearch_result = self.hhsearch_pdb70_runner.query(
uniref90_msa_as_a3m)
with open(pdb70_out_path, 'w') as f:
f.write(hhsearch_result)
else:
with open(pdb70_out_path, 'r') as f:
hhsearch_result = f.read()
uniref90_msa, uniref90_deletion_matrix, _ = parsers.parse_stockholm(
jackhmmer_uniref90_result['sto'])
mgnify_msa, mgnify_deletion_matrix, _ = parsers.parse_stockholm(
jackhmmer_mgnify_result['sto'])
hhsearch_hits = parsers.parse_hhr(hhsearch_result)
#mgnify_msa = mgnify_msa[:self.mgnify_max_hits]
#mgnify_deletion_matrix = mgnify_deletion_matrix[:self.mgnify_max_hits]
#uniref90_msa = uniref90_msa[:self.uniref_max_hits]
#uniref90_deletion_matrix = uniref90_deletion_matrix[:self.uniref_max_hits]
if self._use_small_bfd:
bfd_out_path = os.path.join(msa_output_dir, 'small_bfd_hits.a3m')
if not os.path.exists(bfd_out_path):
jackhmmer_small_bfd_result = self.jackhmmer_small_bfd_runner.query(
input_fasta_path)[0]
with open(bfd_out_path, 'w') as f:
f.write(jackhmmer_small_bfd_result['sto'])
else:
jackhmmer_small_bfd_result = {}
with open(bfd_out_path, 'r') as f:
jackhmmer_small_bfd_result['sto'] = f.read()
bfd_msa, bfd_deletion_matrix, _ = parsers.parse_stockholm(
jackhmmer_small_bfd_result['sto'])
else:
bfd_out_path = os.path.join(msa_output_dir,
'bfd_uniclust_hits.a3m')
if not os.path.exists(bfd_out_path):
hhblits_bfd_uniclust_result = self.hhblits_bfd_uniclust_runner.query(
input_fasta_path)
with open(bfd_out_path, 'w') as f:
f.write(hhblits_bfd_uniclust_result['a3m'])
else:
hhblits_bfd_uniclust_result = {}
with open(bfd_out_path, 'r') as f:
hhblits_bfd_uniclust_result['a3m'] = f.read()
bfd_msa, bfd_deletion_matrix = parsers.parse_a3m(
hhblits_bfd_uniclust_result['a3m'])
#bfd_msa = bfd_msa[:self.bfd_max_hits]
#bfd_deletion_matrix = bfd_deletion_matrix[:self.bfd_max_hits]
if homooligomer > 1:
templates_result = make_mock_template(
query_sequence=input_sequence * homooligomer)
else:
templates_result = self.template_featurizer.get_templates(
query_sequence=input_sequence,
query_pdb_code=None,
query_release_date=None,
hits=hhsearch_hits)
sequence_features = make_sequence_features(
sequence=input_sequence,
description=input_description,
num_res=num_res,
homooligomer=homooligomer)
msa_features = make_msa_features(
msas=(uniref90_msa, bfd_msa, mgnify_msa),
deletion_matrices=(uniref90_deletion_matrix, bfd_deletion_matrix,
mgnify_deletion_matrix),
Ln=len(input_sequence),
msa_size_gb=msa_size_gb,
homooligomer=homooligomer)
logging.info('Uniref90 MSA size: %d sequences.', len(uniref90_msa))
logging.info('BFD MSA size: %d sequences.', len(bfd_msa))
logging.info('MGnify MSA size: %d sequences.', len(mgnify_msa))
logging.info('Final (deduplicated) MSA size: %d sequences.',
msa_features['num_alignments'][0])
logging.info(
'Total number of templates (NB: this can include bad '
'templates and is later filtered to top 4): %d.',
templates_result.features['template_domain_names'].shape[0])
return {
**sequence_features,
**msa_features,
**templates_result.features
}
def create_msas(self, input_fasta_path: str, msa_output_dir: str) -> None:
"""Runs alignment tools on the input sequence."""
with open(input_fasta_path) as f:
input_fasta_str = f.read()
input_seqs, input_descs = parsers.parse_fasta(input_fasta_str)
if len(input_seqs) != 1:
raise ValueError(
f'More than one input sequence found in {input_fasta_path}.')
input_sequence = input_seqs[0]
input_description = input_descs[0]
num_res = len(input_sequence)
prefix: str = input_description
dbs = []
pickled_msa_path = os.path.join(msa_output_dir, f"{prefix}.pickle")
logging.info(f'Pickled MSA Path: {pickled_msa_path}')
if not os.path.exists(pickled_msa_path):
logging.info(
f'Pickled MSA Path does not exist yet: {pickled_msa_path}')
uniref90_out_path = os.path.join(msa_output_dir,
f'{prefix}_uniref90_hits.sto')
if not os.path.exists(uniref90_out_path):
jackhmmer_uniref90_result = self.jackhmmer_uniref90_runner.query(
input_fasta_path)[0]
with open(uniref90_out_path, 'w') as f:
f.write(jackhmmer_uniref90_result['sto'])
else:
jackhmmer_uniref90_result = {}
with open(uniref90_out_path, 'r') as f:
jackhmmer_uniref90_result['sto'] = f.read()
mgnify_out_path = os.path.join(msa_output_dir,
f'{prefix}_mgnify_hits.sto')
if not os.path.exists(mgnify_out_path):
jackhmmer_mgnify_result = self.jackhmmer_mgnify_runner.query(
input_fasta_path)[0]
with open(mgnify_out_path, 'w') as f:
f.write(jackhmmer_mgnify_result['sto'])
else:
jackhmmer_mgnify_result = {}
with open(uniref90_out_path, 'r') as f:
jackhmmer_mgnify_result['sto'] = f.read()
if self._use_small_bfd:
bfd_out_path = os.path.join(msa_output_dir,
f'{prefix}_small_bfd_hits.a3m')
if not os.path.exists(bfd_out_path):
jackhmmer_small_bfd_result = self.jackhmmer_small_bfd_runner.query(
input_fasta_path)[0]
with open(bfd_out_path, 'w') as f:
f.write(jackhmmer_small_bfd_result['sto'])
else:
jackhmmer_small_bfd_result = {}
with open(bfd_out_path, 'r') as f:
jackhmmer_small_bfd_result['sto'] = f.read()
dbs.append(('smallbfd', jackhmmer_small_bfd_result))
else:
bfd_out_path = os.path.join(msa_output_dir,
f'{prefix}_bfd_uniclust_hits.a3m')
if not os.path.exists(bfd_out_path):
hhblits_bfd_uniclust_result = self.hhblits_bfd_uniclust_runner.query(
input_fasta_path)
with open(bfd_out_path, 'w') as f:
f.write(hhblits_bfd_uniclust_result['a3m'])
else:
hhblits_bfd_uniclust_result = {}
with open(bfd_out_path, 'r') as f:
hhblits_bfd_uniclust_result['a3m'] = f.read()
dbs.append(('bfd', hhblits_bfd_uniclust_result))
dbs.append(('uniref90', jackhmmer_uniref90_result))
dbs.append(('mgnify', jackhmmer_mgnify_result))
msas = []
deletion_matrices = []
names = []
for db_name, db_results in dbs:
try:
unsorted_results = []
for i, result in enumerate(db_results):
if db_name == 'bfd':
msa, deletion_matrix = parsers.parse_a3m(
db_results['a3m'])
else:
msa, deletion_matrix, target_names = parsers.parse_stockholm(
db_results['sto'])
e_values_dict = parsers.parse_e_values_from_tblout(
db_results['tbl'])
e_values = [
e_values_dict[t.split('/')[0]]
for t in target_names
]
zipped_results = zip(msa, deletion_matrix,
target_names, e_values)
if i != 0:
# Only take query from the first chunk
zipped_results = [
x for x in zipped_results if x[2] != 'query'
]
unsorted_results.extend(zipped_results)
sorted_by_evalue = sorted(unsorted_results,
key=lambda x: x[3])
db_msas, db_deletion_matrices, db_names, _ = zip(
*sorted_by_evalue)
if db_msas:
msas.append(db_msas)
deletion_matrices.append(db_deletion_matrices)
names.append(db_names)
msa_size = len(set(db_msas))
logging.info(
f'{msa_size} Sequences Found in {db_name}')
except:
unsorted_results = []
msa, deletion_matrix, target_names = parsers.parse_stockholm(
db_results['sto'])
zipped_results = zip(msa, deletion_matrix, target_names)
if db_name != 'smallbfd':
zipped_results = [
x for x in zipped_results
if x[2] != input_description
]
unsorted_results.extend(zipped_results)
if not len(unsorted_results):
continue # if no hits found
db_msas, db_deletion_matrices, db_names = zip(
*unsorted_results)
msas.append(db_msas)
deletion_matrices.append(db_deletion_matrices)
names.append(db_names)
logging.info(f'Making msa pickle: {pickled_msa_path}')
with open(pickled_msa_path, 'wb') as F:
pickle.dump(
{
"msas": msas,
"deletion_matrices": deletion_matrices,
"names": names
}, F)
return None
def combine_msas(self,
input_fasta_path: str,
msa_output_dir: str,
homooligomer: str,
turbo: bool = False) -> None:
"""Runs alignment tools on the input sequence. Note, input sequence should be ori_sequence"""
with open(input_fasta_path) as f:
input_fasta_str = f.read()
input_seqs, input_descs = parsers.parse_fasta(input_fasta_str)
if len(input_seqs) != 1:
raise ValueError(
f'More than one input sequence found in {input_fasta_path}.')
input_sequence = input_seqs[0]
input_description = input_descs[0]
ori_sequence: str = input_sequence # ori_sequence = "MLASVAS:ASVASDV"
sequence: str = ori_sequence.replace(':',
'') # sequence = "MLASVASASVASDV"
seqs: List[str] = ori_sequence.split(
':') # seqs = ["MLASVAS", "ASVASDV"]
homooligomers: List[int] = [int(h) for h in homooligomer.split(':')
] # homooligomers = [2, 2]
full_sequence: str = "".join([
s * h for s, h in zip(seqs, homooligomers)
]) # full_sequence = "MLASVASMLASVASASVASDVASVASDV"
_blank_seq = ["-" * len(seq) for seq in seqs]
_blank_mtx = [[0] * len(seq) for seq in seqs]
def _pad(ns, vals, mode):
if mode == "seq": _blank = _blank_seq.copy()
if mode == "mtx": _blank = _blank_mtx.copy()
if isinstance(ns, list):
for n, val in zip(ns, vals):
_blank[n] = val
else:
_blank[ns] = vals
if mode == "seq": return "".join(_blank)
if mode == "mtx": return sum(_blank, [])
combined_msa_pickle = os.path.join(msa_output_dir,
'combined_msa.pickle')
if not os.path.exists(combined_msa_pickle):
msas = []
deletion_matrices = []
for n, seq in enumerate(seqs):
prefix = str(n)
pickled_msa_path = os.path.join(msa_output_dir,
f"{prefix}.pickle")
msas_dict = pickle.load(open(pickled_msa_path, "rb"))
msas_, mtxs_, names_ = (
msas_dict[k]
for k in ['msas', 'deletion_matrices', 'names'])
# pad sequences
for msa_, mtx_ in zip(msas_, mtxs_):
msa, mtx = [sequence], [[0] * len(sequence)]
for s, m in zip(msa_, mtx_):
msa.append(_pad(n, s, "seq"))
mtx.append(_pad(n, m, "mtx"))
msas.append(msa)
deletion_matrices.append(mtx)
pickle.dump({
"msas": msas,
"deletion_matrices": deletion_matrices
}, open(combined_msa_pickle, "wb"))
else:
with open(combined_msa_pickle, 'rb') as F:
combined_msa = pickle.load(F)
msas = combined_msa['msas']
deletion_matrices = combined_msa['deletion_matrices']
full_msa = []
for msa in msas:
full_msa += msa
deduped_full_msa = list(dict.fromkeys(full_msa))
total_msa_size = len(deduped_full_msa)
logging.info(f'{total_msa_size} Sequences Found in Total\n')
lengths = [len(seq) for seq in seqs]
msas_mod, deletion_matrices_mod = homooligomerize_heterooligomer(
msas, deletion_matrices, lengths, homooligomers)
num_res = len(full_sequence)
feature_dict = {}
feature_dict.update(
make_sequence_features(full_sequence, 'test', num_res))
feature_dict.update(
make_msa_features(msas_mod,
deletion_matrices=deletion_matrices_mod,
Ln=num_res))
if not turbo:
feature_dict.update(_placeholder_template_feats(0, num_res))
logging.info(f'Sequences: {str(seqs)}')
logging.info(f'Homooligomers: {str(homooligomers)}')
Ls = []
for seq, h in zip(seqs, homooligomers):
Ls += [len(seq)] * h
logging.info(
f'Original residue index: {str(feature_dict["residue_index"])}')
logging.info(f'Sequence Lengths: {str(Ls)}')
feature_dict['residue_index'] = chain_break(
feature_dict['residue_index'], Ls)
logging.info(
f'Fixed residue index: {str(feature_dict["residue_index"])}')
logging.info('Final (deduplicated) MSA size: %d sequences.',
feature_dict['num_alignments'][0])
return feature_dict
|
the-stack_106_23748 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Exempi(AutotoolsPackage):
"""exempi is a port of Adobe XMP SDK to work on UNIX and to be build with
GNU automake.
It includes XMPCore and XMPFiles, libexempi, a C-based API and exempi
a command line tool.
"""
homepage = "https://libopenraw.freedesktop.org/wiki/Exempi"
url = "https://libopenraw.freedesktop.org/download/exempi-2.5.2.tar.bz2"
version('2.5.2', sha256='52f54314aefd45945d47a6ecf4bd21f362e6467fa5d0538b0d45a06bc6eaaed5')
depends_on('zlib')
depends_on('iconv')
depends_on('[email protected]:')
depends_on('pkgconfig')
depends_on('expat')
conflicts('%gcc@:4.5')
def patch(self):
# fix make check: Fix undefined reference to `boost::unit_test::unit_test_main`:
# BOOST_TEST_DYN_LINK only works with shlib and when boost is linked after src:
# https://bugs.launchpad.net/widelands/+bug/662908
# https://github.com/bincrafters/community/issues/127
filter_file('#define BOOST_TEST_DYN_LINK', '', 'exempi/tests/test-adobesdk.cpp')
def configure_args(self):
args = ['--with-boost={0}'.format(self.spec['boost'].prefix)]
if self.spec.satisfies('platform=darwin'):
args += ['--with-darwinports', '--with-fink']
return args
|
the-stack_106_23752 | """
File: weather_master.py
Name: Karen Wong
-----------------------
This program should implement a console program that asks weather data from user to compute the average, highest, lowest, cold days among the inputs.
Output format should match what is shown in the sample run in the Assignment 2 Handout.
"""
EXIT = -100
def main():
"""
This program is used to calculate the highest, lowest, average temperature
and the total cold day (temperature < 16 ) of the given sequences.
"""
print('stanCode "Weather Master 4.0"!')
temp = int(input('Next Temperature(or ' + str(EXIT) + ' to quit)? '))
if temp == EXIT:
print('No temperature were entered.')
else:
highest = temp
lowest = temp
average = temp
if temp < 16:
cold = 1
else:
cold = 0
total_num = 1
while True:
temp1 = int(input('Next Temperature(or ' + str(EXIT)+' to quit)? '))
if temp1 == EXIT:
break
else:
total_num += 1
if temp1 > highest:
highest = temp1
if temp1 < lowest:
lowest = temp1
if temp1 <= 16:
cold += 1
average = (temp1+average*(total_num-1))/total_num
temp = temp1
print('Highest temperature = ' + str(highest))
print('Lowest temperature = ' + str(lowest))
print('Average temperature = ' + str(average))
print(str(cold)+' cold days!')
if __name__ == "__main__":
main()
|
the-stack_106_23753 | import sys
import os
import numpy as np
import pytest
import keepa
import datetime
py2 = sys.version_info.major == 2
py37 = sys.version_info.major == 3 and sys.version_info.minor == 7
# reduce the request limit for testing
keepa.interface.REQLIM = 2
try:
path = os.path.dirname(os.path.realpath(__file__))
keyfile = os.path.join(path, 'key')
weak_keyfile = os.path.join(path, 'weak_key')
except:
keyfile = '/home/alex/python/keepa/tests/key'
weak_keyfile = '/home/alex/python/keepa/tests/weak_key'
if os.path.isfile(keyfile):
with open(keyfile) as f:
TESTINGKEY = f.read()
with open(weak_keyfile) as f:
WEAKTESTINGKEY = f.read()
else:
# from travis-ci or appveyor
TESTINGKEY = os.environ.get('KEEPAKEY')
WEAKTESTINGKEY = os.environ.get('WEAKKEEPAKEY')
# harry potter book ISBN
PRODUCT_ASIN = '0439064872'
# ASINs of a bunch of chairs
# categories = API.search_for_categories('chairs')
# asins = []
# for category in categories:
# asins.extend(API.best_sellers_query(category))
# PRODUCT_ASINS = asins[:40]
PRODUCT_ASINS = ['B00IAPNWG6', 'B01CUJMSB2', 'B01CUJMRLI',
'B00BMPT7CE', 'B00IAPNWE8', 'B0127O51FK',
'B01CUJMT3E', 'B01A5ZIXKI', 'B00KQPBF1W',
'B000J3UZ58', 'B00196LLDO', 'B002VWK2EE',
'B00E2I3BPM', 'B004FRSUO2', 'B00CM1TJ1G',
'B00VS4514C', 'B075G1B1PK', 'B00R9EAH8U',
'B004L2JKTU', 'B008SIDW2E', 'B078XL8CCW',
'B000VXII46', 'B07D1CJ8CK', 'B07B5HZ7D9',
'B002VWK2EO', 'B000VXII5A', 'B004N1AA5W',
'B002VWKP3W', 'B00CM9OM0G', 'B002VWKP4G',
'B004N18JDC', 'B07MDHF4CP', 'B002VWKP3C',
'B07FTVSNL2', 'B002VWKP5A', 'B002O0LBFW',
'B07BM1Q64Q', 'B004N18JM8', 'B004N1AA02',
'B002VWK2EY']
# open connection to keepa
@pytest.fixture(scope='module')
def api():
keepa_api = keepa.Keepa(TESTINGKEY)
assert keepa_api.tokens_left
assert keepa_api.time_to_refill >= 0
return keepa_api
def test_deals(api):
deal_parms = {
"page": 0,
"domainId": 1,
"excludeCategories": [1064954, 11091801],
"includeCategories": [16310101]}
deals = api.deals(deal_parms)
assert isinstance(deals, list)
assert isinstance(deals[0], str)
def test_invalidkey():
with pytest.raises(Exception):
keepa.Api('thisisnotavalidkey')
def test_deadkey():
with pytest.raises(Exception):
# this key returns "payment required"
deadkey = '8ueigrvvnsp5too0atlb5f11veinerkud47p686ekr7vgr9qtj1t1tle15fffkkm'
keepa.Api(deadkey)
def test_product_finder_categories(api):
product_parms = {'categories_include': ['1055398']}
products = api.product_finder(product_parms)
assert products
def test_product_finder_query(api):
product_parms = {'author': 'jim butcher',
'page': 1,
'perPage': 50,
'categories_exclude': ['1055398']}
asins = api.product_finder(product_parms)
assert asins
# @pytest.mark.skipif(not py37, reason="Too much throttling for travisCI")
# def test_throttling(api):
# api = keepa.Keepa(WEAKTESTINGKEY)
# keepa.interface.REQLIM = 20
# # exaust tokens
# while api.tokens_left > 0:
# api.query(PRODUCT_ASINS[:5])
# # this must trigger a wait...
# t_start = time.time()
# products = api.query(PRODUCT_ASINS)
# assert (time.time() - t_start) > 1
# keepa.interface.REQLIM = 2
def test_productquery_nohistory(api):
pre_update_tokens = api.tokens_left
request = api.query(PRODUCT_ASIN, history=False)
assert api.tokens_left != pre_update_tokens
product = request[0]
assert product['csv'] is None
assert product['asin'] == PRODUCT_ASIN
def test_not_an_asin(api):
with pytest.raises(Exception):
asins = ['0000000000', '000000000x']
request = api.query(asins)
def test_isbn13(api):
isbn13 = '9780786222728'
request = api.query(isbn13, product_code_is_asin=False, history=False)
def test_productquery_update(api):
request = api.query(PRODUCT_ASIN, update=0, stats=90, rating=True)
product = request[0]
# should be live data
now = datetime.datetime.now()
delta = now - product['data']['USED_time'][-1]
assert delta.days <= 30
# check for empty arrays
history = product['data']
for key in history:
assert history[key].any()
# should be a key pair
if 'time' not in key:
assert history[key].size == history[key + '_time'].size
# check for stats
assert 'stats' in product
# no offers requested by default
assert product['offers'] is None
def test_productquery_offers(api):
request = api.query(PRODUCT_ASIN, offers=20)
product = request[0]
offers = product['offers']
for offer in offers:
assert offer['lastSeen']
assert not len(offer['offerCSV']) % 3
# also test offer conversion
offer = offers[1]
times, prices = keepa.convert_offer_history(offer['offerCSV'])
assert times.dtype == datetime.datetime
assert prices.dtype == np.double
assert len(times)
assert len(prices)
def test_productquery_offers_invalid(api):
with pytest.raises(ValueError):
request = api.query(PRODUCT_ASIN, offers=2000)
def test_productquery_offers_multiple(api):
products = api.query(PRODUCT_ASINS)
asins = np.unique([product['asin'] for product in products])
assert len(asins) == len(PRODUCT_ASINS)
assert np.in1d(asins, PRODUCT_ASINS).all()
def test_domain(api):
request = api.query(PRODUCT_ASIN, history=False, domain='DE')
product = request[0]
assert product['asin'] == PRODUCT_ASIN
def test_invalid_domain(api):
with pytest.raises(ValueError):
request = api.query(PRODUCT_ASIN, history=False, domain='XX')
def test_bestsellers(api):
categories = api.search_for_categories('chairs')
category = list(categories.items())[0][0]
asins = api.best_sellers_query(category)
valid_asins = keepa.format_items(asins)
assert len(asins) == valid_asins.size
def test_categories(api):
categories = api.search_for_categories('chairs')
catids = list(categories.keys())
for catid in catids:
assert 'chairs' in categories[catid]['name'].lower()
def test_categorylookup(api):
categories = api.category_lookup(0)
for cat_id in categories:
assert categories[cat_id]['name']
def test_invalid_category(api):
with pytest.raises(Exception):
api.category_lookup(-1)
def test_stock(api):
request = api.query(PRODUCT_ASIN, history=False, stock=True,
offers=20)
# all live offers must have stock
product = request[0]
assert product['offersSuccessful']
live = product['liveOffersOrder']
for offer in product['offers']:
if offer['offerId'] in live:
if 'stockCSV' in offer:
assert offer['stockCSV'][-1]
def test_keepatime(api):
keepa_st_ordinal = datetime.datetime(2011, 1, 1)
assert keepa_st_ordinal == keepa.keepa_minutes_to_time(0)
assert keepa.keepa_minutes_to_time(0, to_datetime=False)
@pytest.mark.skipif(py2, reason="Requires python 3.5+ for testing")
def test_plotting(api):
request = api.query(PRODUCT_ASIN, history=True)
product = request[0]
keepa.plot_product(product, show=False)
@pytest.mark.skipif(py2, reason="Requires python 3.5+ for testing")
def test_empty(api):
import matplotlib.pyplot as plt
plt.close('all')
products = api.query(['B01I6KT07E', 'B01G5BJHVK', 'B017LJP1MO'])
with pytest.raises(Exception):
keepa.plot_product(products[0], show=False)
def test_seller_query(api):
seller_id = 'A2L77EE7U53NWQ'
seller_info = api.seller_query(seller_id)
assert len(seller_info) == 1
assert seller_id in seller_info
def test_seller_query_list(api):
seller_id = ['A2L77EE7U53NWQ', 'AMMEOJ0MXANX1']
seller_info = api.seller_query(seller_id)
assert len(seller_info) == len(seller_id)
assert set(seller_info).issubset(seller_id)
def test_seller_query_long_list(api):
seller_id = ['A2L77EE7U53NWQ']*200
with pytest.raises(RuntimeError):
seller_info = api.seller_query(seller_id)
def test_product_finder_query(api):
product_parms = {'author': 'jim butcher'}
asins = api.product_finder(product_parms)
assert asins
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.