filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_16638
|
import logging
import os
import sys
import numpy as np
from pySDC.core import Hooks as hookclass
from pySDC.core.BaseTransfer import base_transfer
from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, params):
self.mssdc_jac = True
self.predict_type = None
self.all_to_done = False
self.logger_level = 20
self.log_to_file = False
self.dump_setup = True
self.fname = 'run_pid' + str(os.getpid()) + '.log'
self.use_iteration_estimator = False
self.use_adaptivity = False
self.use_HotRod = False
self.HotRod_tol = np.inf
self.store_uold = False
self.use_embedded_estimate = False
self.use_extrapolation_estimate = False
for k, v in params.items():
setattr(self, k, v)
self._freeze()
class controller(object):
"""
Base abstract controller class
"""
def __init__(self, controller_params):
"""
Initialization routine for the base controller
Args:
controller_params (dict): parameter set for the controller and the steps
"""
# check if we have a hook on this list. if not, use default class.
controller_params['hook_class'] = controller_params.get('hook_class', hookclass.hooks)
self.__hooks = controller_params['hook_class']()
self.hooks.pre_setup(step=None, level_number=None)
self.params = _Pars(controller_params)
self.__setup_custom_logger(self.params.logger_level, self.params.log_to_file, self.params.fname)
self.logger = logging.getLogger('controller')
if self.params.use_iteration_estimator and self.params.all_to_done:
self.logger.warning('all_to_done and use_iteration_estimator set, will ignore all_to_done')
@staticmethod
def __setup_custom_logger(level=None, log_to_file=None, fname=None):
"""
Helper function to set main parameters for the logging facility
Args:
level (int): level of logging
log_to_file (bool): flag to turn on/off logging to file
fname (str):
"""
assert type(level) is int
# specify formats and handlers
if log_to_file:
file_formatter = logging.Formatter(
fmt='%(asctime)s - %(name)s - %(module)s - %(funcName)s - %(lineno)d - %(levelname)s: %(message)s')
if os.path.isfile(fname):
file_handler = logging.FileHandler(fname, mode='a')
else:
file_handler = logging.FileHandler(fname, mode='w')
file_handler.setFormatter(file_formatter)
else:
file_handler = None
std_formatter = logging.Formatter(fmt='%(name)s - %(levelname)s: %(message)s')
std_handler = logging.StreamHandler(sys.stdout)
std_handler.setFormatter(std_formatter)
# instantiate logger
logger = logging.getLogger('')
# remove handlers from previous calls to controller
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.setLevel(level)
logger.addHandler(std_handler)
if log_to_file:
logger.addHandler(file_handler)
else:
pass
def dump_setup(self, step, controller_params, description):
"""
Helper function to dump the setup used for this controller
Args:
step (pySDC.Step.step): the step instance (will/should be the first one only)
controller_params (dict): controller parameters
description (dict): description of the problem
"""
out = 'Setup overview (--> user-defined) -- BEGIN'
self.logger.info(out)
out = '----------------------------------------------------------------------------------------------------\n\n'
out += 'Controller: %s\n' % self.__class__
for k, v in vars(self.params).items():
if not k.startswith('_'):
if k in controller_params:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '\nStep: %s\n' % step.__class__
for k, v in vars(step.params).items():
if not k.startswith('_'):
if k in description['step_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += ' Level: %s\n' % step.levels[0].__class__
for L in step.levels:
out += ' Level %2i\n' % L.level_index
for k, v in vars(L.params).items():
if not k.startswith('_'):
if k in description['level_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Problem: %s\n' % L.prob.__class__
for k, v in vars(L.prob.params).items():
if not k.startswith('_'):
if k in description['problem_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Data type u: %s\n' % L.prob.dtype_u
out += '--> Data type f: %s\n' % L.prob.dtype_f
out += '--> Sweeper: %s\n' % L.sweep.__class__
for k, v in vars(L.sweep.params).items():
if not k.startswith('_'):
if k in description['sweeper_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Collocation: %s\n' % L.sweep.coll.__class__
if len(step.levels) > 1:
if 'base_transfer_class' in description and description['base_transfer_class'] is not base_transfer:
out += '--> Base Transfer: %s\n' % step.base_transfer.__class__
else:
out += ' Base Transfer: %s\n' % step.base_transfer.__class__
for k, v in vars(step.base_transfer.params).items():
if not k.startswith('_'):
if k in description['base_transfer_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
out += '--> Space Transfer: %s\n' % step.base_transfer.space_transfer.__class__
for k, v in vars(step.base_transfer.space_transfer.params).items():
if not k.startswith('_'):
if k in description['space_transfer_params']:
out += '--> %s = %s\n' % (k, v)
else:
out += ' %s = %s\n' % (k, v)
self.logger.info(out)
out = '----------------------------------------------------------------------------------------------------'
self.logger.info(out)
out = 'Setup overview (--> user-defined) -- END\n'
self.logger.info(out)
@staticmethod
def check_convergence(S):
"""
Routine to determine whether to stop iterating (currently testing the residual + the max. number of iterations)
Args:
S (pySDC.Step.step): current step
Returns:
bool: converged, true or false
"""
# do all this on the finest level
L = S.levels[0]
# get residual and check against prescribed tolerance (plus check number of iterations
res = L.status.residual
converged = S.status.iter >= S.params.maxiter or res <= L.params.restol or S.status.force_done
if converged is None:
converged = False
return converged
def run(self, u0, t0, Tend):
"""
Abstract interface to the run() method
Args:
u0: initial values
t0 (float): starting time
Tend (float): ending time
"""
raise NotImplementedError('ERROR: controller has to implement run(self, u0, t0, Tend)')
@property
def hooks(self):
"""
Getter for the hooks
Returns:
pySDC.Hooks.hooks: hooks
"""
return self.__hooks
|
the-stack_106_16640
|
import collections
import re
from functools import wraps
from grainy.core import int_flags
from ctl.exceptions import PermissionDenied
class expose:
"""
Decorator to expose a ctl plugin's method - permissions will be checked before
method is executed
"""
def __init__(self, namespace, level=None, explicit=False):
"""
**Keyword Arguments**
- namespace (`str`): permissioning namespace, has the following formatting
variables available:
- `plugin`: the plugin instance
- `plugin_name`: name of the plugin instance as defined in config
- any arguments passed to the method by argument name
- level (`function`|`str`|`None`): permission level to check.
- If a function is passed it is expected to return a permission level
string. The function will be passed the plugin instance as an argument.
- If a string is passed it is expected to be a permission level string
- If None is passed, permission level will be obtained from the plugin's
config and default to `r`
- explicit (`bool`=`False`): If true will enable explicit namespace checking
"""
self.namespace = namespace
self.level = level
self.explicit = explicit
def __call__(self, fn):
level = self.level
namespace_ = self.namespace
explicit = self.explicit
@wraps(fn)
def wrapped(self, *args, **kwargs):
# format the namespace using the plugin instance
# and any arguments passed to the decorated method
namespace_args = {
"plugin": self,
"plugin_name": self.pluginmgr_config.get("name"),
}
namespace_args.update(**kwargs)
namespace = namespace_.format(**namespace_args)
# obtain required permission level
if level is None:
# level is not specified at all in the decorator,
# in which case we obtain from plugin config and default to 'r'
permissions = self.config.get("permissions", {}).get(fn.__name__, "r")
elif isinstance(level, collections.Callable):
# level is specified and a function, call it and set from there
permissions = level(self)
else:
# level is specified and will be taken from decroator directly
permissions = level
# check permissions
allowed = self.ctl.permissions.check(
namespace, int_flags(permissions), explicit=explicit
)
# raise on permission check failure
if not allowed:
raise PermissionDenied(namespace, permissions)
# execute method
return fn(self, *args, **kwargs)
doc = []
doc_indent = 0
if fn.__doc__:
doc.append(fn.__doc__)
m = re.search(r"(\s+)", fn.__doc__)
if m:
doc_indent = m.group(1)
doc.extend(
[
f'{doc_indent}!!! note "Exposed to CLI"',
f"{doc_indent} namespace: `{namespace_}`",
]
)
wrapped.__doc__ = "\n".join(doc)
wrapped.exposed = True
return wrapped
|
the-stack_106_16643
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-ajax-tables',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD 3-Clause License', # example license
description='Django app for easy creating tables from models',
long_description=README,
author='Piotr Szelag',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: X.Y', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD 3-Clause License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
# 'Django == 1.7',
],
)
|
the-stack_106_16644
|
# Bethesda Structs documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 29 15:54:27 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import alabaster
sys.path.insert(0, os.path.abspath("../.."))
sys.path.insert(0, os.path.join(os.path.abspath('.'), 'themes'))
from bethesda_structs import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinxcontrib.mermaid"
]
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
# source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Bethesda Structs"
copyright = f"2018, {__version__.__author__}"
author = __version__.__author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__.__version__
# The full version, including alpha/beta/rc tags.
release = __version__.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ["themes"]
html_theme = "alabaster"
html_favicon = "_static/img/favicon.png"
html_sidebars = {
"**": ["about.html", "navigation.html", "relations.html", "authorprojects.html", "searchbox.html"]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"logo": "img/logo.png",
"description": __version__.__description__,
"github_user": "stephen-bunn",
"github_repo": "bethesda-structs",
"github_type": "star",
"analytics_id": "UA-110798724-2",
"page_width": "1000px",
"sidebar_width": "220px",
"sidebar_collapse": True,
# "slackin_button": True,
# "slackin_link": "https://bethesda-structs-slackin.herokuapp.com",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "BethesdaStructsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"BethesdaStructs.tex",
"Bethesda Structs Documentation",
author,
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "bethesdastructs", "Bethesda Structs Documentation", [author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"BethesdaStructs",
"Bethesda Structs Documentation",
author,
"BethesdaStructs",
"One line description of project.",
"Miscellaneous",
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6/", None),
"construct": ("https://construct.readthedocs.io/en/latest/", None),
}
def setup(app):
app.add_stylesheet("css/custom.css")
|
the-stack_106_16646
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
import os.path
import textwrap
from random import choice, randint
exercise = 'src.own_language'
function = "run"
class OwnLanguageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Asking input from the user was not expected")]):
cls.module = load_module(exercise, 'fi')
def test_0a_main_program_ok(self):
ok, line = check_source(self.module)
message = """The code for testng the functions should be placed inside
if __name__ == "__main__":
block. The following row should be moved:
"""
self.assertTrue(ok, message+line)
@points('7.own_programming_language-osa1')
def test1_function_exists(self):
try:
from src.own_language import run
except:
self.assertTrue(False, "Your program should contain function named as run")
@points('7.own_programming_language-part1')
def test2_no_loop(self):
tests = []
program1 = ["PRINT A","END"]
result1 = [0]
tests.append((program1,result1))
program2 = ["MOV A 5","PRINT A"]
result2 = [5]
tests.append((program2,result2))
program3 = ["MOV A 1","MOV B 1","ADD A B","ADD B A","ADD A B","ADD B A","PRINT A","PRINT B"]
result3 = [5,8]
tests.append((program3,result3))
program4 = ["MOV A 2","MUL A A","MUL A A","MUL A A","MUL A A","PRINT A"]
result4 = [65536]
tests.append((program4,result4))
program5 = ["MOV A 10","PRINT A","MOV B A","SUB B 8","PRINT B","SUB A B","PRINT A"]
result5 = [10,2,8]
tests.append((program5,result5))
for test in tests:
with patch('builtins.input', side_effect=[AssertionError("Asking input from the user was not expected")]):
reload_module(self.module)
run = load(exercise, function, 'en')
try:
result = run(test[0])
except:
self.assertFalse(True, "Program "+str(test[0])+" caused an error")
self.assertEqual(result, test[1], "Program "+str(test[0])+" returns an incorrect result "+str(result)+", the correct result would be "+str(test[1]))
@points('7.own_programming_language-part2')
def test3_all_commands(self):
tests = []
program1 = ["PRINT A","END"]
result1 = [0]
tests.append((program1,result1))
program2 = []
result2 = []
tests.append((program2,result2))
program3 = ["MOV A 10","start:","PRINT A","SUB A 1","IF A > 0 JUMP start","END"]
result3 = [10,9,8,7,6,5,4,3,2,1]
tests.append((program3,result3))
program4 = ["MOV A 1","MOV B 1","start:","MUL A 2","ADD B 1","IF B != 101 JUMP start","PRINT A"]
result4 = [1267650600228229401496703205376]
tests.append((program4,result4))
program5 = ["MOV A 1","MOV B 999","start:","ADD A 1","SUB B 1","ADD C 1","IF A == B JUMP end","JUMP start","end:","PRINT C"]
result5 = [499]
tests.append((program5,result5))
program6 = ["MOV N 100","PRINT 2","MOV A 3","start:","MOV B 2","MOV Z 0","test:","MOV C B","new:","IF C == A JUMP virhe","IF C > A JUMP pass_by","ADD C B","JUMP new","virhe:","MOV Z 1","JUMP pass_by2","pass_by:","ADD B 1","IF B < A JUMP test","pass_by2:","IF Z == 1 JUMP pass_by3","PRINT A","pass_by3:","ADD A 1","IF A <= N JUMP start"]
result6 = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97]
tests.append((program6,result6))
for test in tests:
with patch('builtins.input', side_effect=[AssertionError("Asking input from the user was not expected")]):
reload_module(self.module)
run = load(exercise, function, 'fi')
try:
result = run(test[0])
except:
self.assertFalse(True, "Program "+str(test[0])+" causes on error")
self.assertEqual(result, test[1], "Program "+str(test[0])+" returns an incorrect result "+str(result)+", the correct result would be "+str(test[1]))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_16650
|
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Tang J, Qu M, Wang M, et al. Line: Large-scale information network embedding[C]//Proceedings of the 24th International Conference on World Wide Web. International World Wide Web Conferences Steering Committee, 2015: 1067-1077.(https://arxiv.org/pdf/1503.03578.pdf)
"""
import math
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Embedding, Input, Lambda
from tensorflow.python.keras.models import Model
from ..alias import create_alias_table, alias_sample
from ..utils import preprocess_nxgraph
def line_loss(y_true, y_pred):
return -K.mean(K.log(K.sigmoid(y_true*y_pred)))
def create_model(numNodes, embedding_size, order='second'):
v_i = Input(shape=(1,))
v_j = Input(shape=(1,))
first_emb = Embedding(numNodes, embedding_size, name='first_emb')
second_emb = Embedding(numNodes, embedding_size, name='second_emb')
context_emb = Embedding(numNodes, embedding_size, name='context_emb')
v_i_emb = first_emb(v_i)
v_j_emb = first_emb(v_j)
v_i_emb_second = second_emb(v_i)
v_j_context_emb = context_emb(v_j)
first = Lambda(lambda x: tf.reduce_sum(
x[0]*x[1], axis=-1, keep_dims=False), name='first_order')([v_i_emb, v_j_emb])
second = Lambda(lambda x: tf.reduce_sum(
x[0]*x[1], axis=-1, keep_dims=False), name='second_order')([v_i_emb_second, v_j_context_emb])
if order == 'first':
output_list = [first]
elif order == 'second':
output_list = [second]
else:
output_list = [first, second]
model = Model(inputs=[v_i, v_j], outputs=output_list)
return model, {'first': first_emb, 'second': second_emb}
class LINE:
def __init__(self, graph, embedding_size=8, negative_ratio=5, order='second',):
"""
:param graph:
:param embedding_size:
:param negative_ratio:
:param order: 'first','second','all'
"""
if order not in ['first', 'second', 'all']:
raise ValueError('mode must be fisrt,second,or all')
self.graph = graph
self.idx2node, self.node2idx = preprocess_nxgraph(graph)
self.use_alias = True
self.rep_size = embedding_size
self.order = order
self._embeddings = {}
self.negative_ratio = negative_ratio
self.order = order
self.node_size = graph.number_of_nodes()
self.edge_size = graph.number_of_edges()
self.samples_per_epoch = self.edge_size*(1+negative_ratio)
self._gen_sampling_table()
self.reset_model()
def reset_training_config(self, batch_size, times):
self.batch_size = batch_size
self.steps_per_epoch = (
(self.samples_per_epoch - 1) // self.batch_size + 1)*times
def reset_model(self, opt='adam'):
self.model, self.embedding_dict = create_model(
self.node_size, self.rep_size, self.order)
self.model.compile(opt, line_loss)
self.batch_it = self.batch_iter(self.node2idx)
def _gen_sampling_table(self):
# create sampling table for vertex
power = 0.75
numNodes = self.node_size
node_degree = np.zeros(numNodes) # out degree
node2idx = self.node2idx
for edge in self.graph.edges():
node_degree[node2idx[edge[0]]
] += self.graph[edge[0]][edge[1]].get('weight', 1.0)
total_sum = sum([math.pow(node_degree[i], power)
for i in range(numNodes)])
norm_prob = [float(math.pow(node_degree[j], power)) /
total_sum for j in range(numNodes)]
self.node_accept, self.node_alias = create_alias_table(norm_prob)
# create sampling table for edge
numEdges = self.graph.number_of_edges()
total_sum = sum([self.graph[edge[0]][edge[1]].get('weight', 1.0)
for edge in self.graph.edges()])
norm_prob = [self.graph[edge[0]][edge[1]].get('weight', 1.0) *
numEdges / total_sum for edge in self.graph.edges()]
self.edge_accept, self.edge_alias = create_alias_table(norm_prob)
def batch_iter(self, node2idx):
edges = [(node2idx[x[0]], node2idx[x[1]]) for x in self.graph.edges()]
data_size = self.graph.number_of_edges()
shuffle_indices = np.random.permutation(np.arange(data_size))
# positive or negative mod
mod = 0
mod_size = 1 + self.negative_ratio
h = []
t = []
sign = 0
count = 0
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
while True:
if mod == 0:
h = []
t = []
for i in range(start_index, end_index):
if random.random() >= self.edge_accept[shuffle_indices[i]]:
shuffle_indices[i] = self.edge_alias[shuffle_indices[i]]
cur_h = edges[shuffle_indices[i]][0]
cur_t = edges[shuffle_indices[i]][1]
h.append(cur_h)
t.append(cur_t)
sign = np.ones(len(h))
else:
sign = np.ones(len(h))*-1
t = []
for i in range(len(h)):
t.append(alias_sample(
self.node_accept, self.node_alias))
if self.order == 'all':
yield ([np.array(h), np.array(t)], [sign, sign])
else:
yield ([np.array(h), np.array(t)], [sign])
mod += 1
mod %= mod_size
if mod == 0:
start_index = end_index
end_index = min(start_index + self.batch_size, data_size)
if start_index >= data_size:
count += 1
mod = 0
h = []
shuffle_indices = np.random.permutation(np.arange(data_size))
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
def get_embeddings(self,):
self._embeddings = {}
if self.order == 'first':
embeddings = self.embedding_dict['first'].get_weights()[0]
elif self.order == 'second':
embeddings = self.embedding_dict['second'].get_weights()[0]
else:
embeddings = np.hstack((self.embedding_dict['first'].get_weights()[
0], self.embedding_dict['second'].get_weights()[0]))
idx2node = self.idx2node
for i, embedding in enumerate(embeddings):
self._embeddings[idx2node[i]] = embedding
return self._embeddings
def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1, times=1):
self.reset_training_config(batch_size, times)
hist = self.model.fit_generator(self.batch_it, epochs=epochs, initial_epoch=initial_epoch, steps_per_epoch=self.steps_per_epoch,
verbose=verbose)
return hist
|
the-stack_106_16651
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import namedtuple
from struct import Struct
# binary output and reader version
VERSION = 100
SINGLE_BIN_FILE_TYPE = 1
COMBINE_BIN_FILE_TYPE = 2
# used to share header info between writer and reader, with 31 bytes padding for later using
header_struct = Struct("<4s b I Q I QQ QQ qq")
# binary file header
FileHeader = namedtuple(
"FileHeader",
[
"name", "file_type", "version", "item_count", "item_size", "meta_offset", "meta_size",
"data_offset", "data_size", "starttime", "endtime"
]
)
meta_item_format = "20s2s"
# mapping from meta info pack format string
dtype_pack_map = {
"i": "i",
"i4": "i",
"i2": "h",
"i8": "q",
"f": "f",
"d": "d"
}
dtype_convert_map = {
"i": int,
'i2': int,
'i4': int,
'i8': int,
'f': float,
'd': float
}
# merged file part
# row meta: item_count, key value
merged_row_meta_struct = Struct("<H Q")
merged_row_item_count_struct = Struct("< H")
|
the-stack_106_16652
|
# Copyright (C) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import io
import json
import os
import os.path as osp
import shutil
import traceback
import uuid
from datetime import datetime
from distutils.util import strtobool
from tempfile import mkstemp, TemporaryDirectory
import cv2
from django.db.models.query import Prefetch
import django_rq
from django.apps import apps
from django.conf import settings
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.utils.decorators import method_decorator
from django_filters import rest_framework as filters
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg import openapi
from drf_yasg.inspectors import CoreAPICompatInspector, NotHandled, FieldInspector
from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins, serializers, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import APIException, NotFound, ValidationError
from rest_framework.permissions import SAFE_METHODS, IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from sendfile import sendfile
import cvat.apps.dataset_manager as dm
import cvat.apps.dataset_manager.views # pylint: disable=unused-import
from cvat.apps.authentication import auth
from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials
from cvat.apps.dataset_manager.bindings import CvatImportError
from cvat.apps.dataset_manager.serializers import DatasetFormatsSerializer
from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.models import (
Job, StatusChoice, Task, Project, Review, Issue,
Comment, StorageMethodChoice, ReviewStatus, StorageChoice, Image,
CredentialsTypeChoice, CloudProviderChoice
)
from cvat.apps.engine.models import CloudStorage as CloudStorageModel
from cvat.apps.engine.serializers import (
AboutSerializer, AnnotationFileSerializer, BasicUserSerializer,
DataMetaSerializer, DataSerializer, ExceptionSerializer,
FileInfoSerializer, JobSerializer, LabeledDataSerializer,
LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer,
RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer,
CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer,
CloudStorageSerializer, BaseCloudStorageSerializer, TaskFileSerializer,)
from utils.dataset_manifest import ImageManifestManager
from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.backup import import_task
from cvat.apps.authentication.models import User
from . import models, task
from .log import clogger, slogger
class ServerViewSet(viewsets.ViewSet):
serializer_class = None
# To get nice documentation about ServerViewSet actions it is necessary
# to implement the method. By default, ViewSet doesn't provide it.
def get_serializer(self, *args, **kwargs):
pass
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides basic CVAT information',
responses={'200': AboutSerializer})
@action(detail=False, methods=['GET'], serializer_class=AboutSerializer)
def about(request):
from cvat import __version__ as cvat_version
about = {
"name": "Computer Vision Annotation Tool",
"version": cvat_version,
"description": "CVAT is completely re-designed and re-implemented " +
"version of Video Annotation Tool from Irvine, California " +
"tool. It is free, online, interactive video and image annotation " +
"tool for computer vision. It is being used by our team to " +
"annotate million of objects with different properties. Many UI " +
"and UX decisions are based on feedbacks from professional data " +
"annotation team."
}
serializer = AboutSerializer(data=about)
if serializer.is_valid(raise_exception=True):
return Response(data=serializer.data)
@staticmethod
@swagger_auto_schema(method='post', request_body=ExceptionSerializer)
@action(detail=False, methods=['POST'], serializer_class=ExceptionSerializer)
def exception(request):
"""
Saves an exception from a client on the server
Sends logs to the ELK if it is connected
"""
serializer = ExceptionSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
additional_info = {
"username": request.user.username,
"name": "Send exception",
}
message = JSONRenderer().render({**serializer.data, **additional_info}).decode('UTF-8')
jid = serializer.data.get("job_id")
tid = serializer.data.get("task_id")
if jid:
clogger.job[jid].error(message)
elif tid:
clogger.task[tid].error(message)
else:
clogger.glob.error(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@staticmethod
@swagger_auto_schema(method='post', request_body=LogEventSerializer(many=True))
@action(detail=False, methods=['POST'], serializer_class=LogEventSerializer)
def logs(request):
"""
Saves logs from a client on the server
Sends logs to the ELK if it is connected
"""
serializer = LogEventSerializer(many=True, data=request.data)
if serializer.is_valid(raise_exception=True):
user = { "username": request.user.username }
for event in serializer.data:
message = JSONRenderer().render({**event, **user}).decode('UTF-8')
jid = event.get("job_id")
tid = event.get("task_id")
if jid:
clogger.job[jid].info(message)
elif tid:
clogger.task[tid].info(message)
else:
clogger.glob.info(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@staticmethod
@swagger_auto_schema(
method='get', operation_summary='Returns all files and folders that are on the server along specified path',
manual_parameters=[openapi.Parameter('directory', openapi.IN_QUERY, type=openapi.TYPE_STRING, description='Directory to browse')],
responses={'200' : FileInfoSerializer(many=True)}
)
@action(detail=False, methods=['GET'], serializer_class=FileInfoSerializer)
def share(request):
param = request.query_params.get('directory', '/')
if param.startswith("/"):
param = param[1:]
directory = os.path.abspath(os.path.join(settings.SHARE_ROOT, param))
if directory.startswith(settings.SHARE_ROOT) and os.path.isdir(directory):
data = []
content = os.scandir(directory)
for entry in content:
entry_type = None
if entry.is_file():
entry_type = "REG"
elif entry.is_dir():
entry_type = "DIR"
if entry_type:
data.append({"name": entry.name, "type": entry_type})
serializer = FileInfoSerializer(many=True, data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
else:
return Response("{} is an invalid directory".format(param),
status=status.HTTP_400_BAD_REQUEST)
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides the list of supported annotations formats',
responses={'200': DatasetFormatsSerializer()})
@action(detail=False, methods=['GET'], url_path='annotation/formats')
def annotation_formats(request):
data = dm.views.get_all_formats()
return Response(DatasetFormatsSerializer(data).data)
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides allowed plugins.',
responses={'200': PluginsSerializer()})
@action(detail=False, methods=['GET'], url_path='plugins', serializer_class=PluginsSerializer)
def plugins(request):
response = {
'GIT_INTEGRATION': apps.is_installed('cvat.apps.dataset_repo'),
'ANALYTICS': False,
'MODELS': False,
'PREDICT': apps.is_installed('cvat.apps.training')
}
if strtobool(os.environ.get("CVAT_ANALYTICS", '0')):
response['ANALYTICS'] = True
if strtobool(os.environ.get("CVAT_SERVERLESS", '0')):
response['MODELS'] = True
return Response(response)
class ProjectFilter(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
owner = filters.CharFilter(field_name="owner__username", lookup_expr="icontains")
status = filters.CharFilter(field_name="status", lookup_expr="icontains")
class Meta:
model = models.Project
fields = ("id", "name", "owner", "status")
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of projects according to query parameters (12 projects per page)',
manual_parameters=[
openapi.Parameter('id', openapi.IN_QUERY, description="A unique number value identifying this project",
type=openapi.TYPE_NUMBER),
openapi.Parameter('name', openapi.IN_QUERY, description="Find all projects where name contains a parameter value",
type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="Find all project where owner name contains a parameter value",
type=openapi.TYPE_STRING),
openapi.Parameter('status', openapi.IN_QUERY, description="Find all projects with a specific status",
type=openapi.TYPE_STRING, enum=[str(i) for i in StatusChoice]),
openapi.Parameter('names_only', openapi.IN_QUERY, description="Returns only names and id's of projects.",
type=openapi.TYPE_BOOLEAN),
openapi.Parameter('without_tasks', openapi.IN_QUERY, description="Returns only projects entities without related tasks",
type=openapi.TYPE_BOOLEAN)],))
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Method creates a new project'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a specific project'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method deletes a specific project'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Methods does a partial update of chosen fields in a project'))
class ProjectViewSet(auth.ProjectGetQuerySetMixin, viewsets.ModelViewSet):
queryset = models.Project.objects.all().order_by('-id')
search_fields = ("name", "owner__username", "status")
filterset_class = ProjectFilter
ordering_fields = ("id", "name", "owner", "status", "assignee")
http_method_names = ['get', 'post', 'head', 'patch', 'delete']
def get_serializer_class(self):
if self.request.path.endswith('tasks'):
return TaskSerializer
if self.request.query_params and self.request.query_params.get("names_only") == "true":
return ProjectSearchSerializer
if self.request.query_params and self.request.query_params.get("without_tasks") == "true":
return ProjectWithoutTaskSerializer
else:
return ProjectSerializer
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.ProjectAccessPermission)
elif http_method in ["POST"]:
permissions.append(auth.ProjectCreatePermission)
elif http_method in ["PATCH"]:
permissions.append(auth.ProjectChangePermission)
elif http_method in ["DELETE"]:
permissions.append(auth.ProjectDeletePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def perform_create(self, serializer):
def validate_project_limit(owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['project_limit'] is not None and \
Project.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['project_limit']:
raise serializers.ValidationError('The user has the maximum number of projects')
owner = self.request.data.get('owner', None)
if owner:
validate_project_limit(owner)
serializer.save()
else:
validate_project_limit(self.request.user)
serializer.save(owner=self.request.user)
@swagger_auto_schema(method='get', operation_summary='Returns information of the tasks of the project with the selected id',
responses={'200': TaskSerializer(many=True)})
@action(detail=True, methods=['GET'], serializer_class=TaskSerializer)
def tasks(self, request, pk):
self.get_object() # force to call check_object_permissions
queryset = Task.objects.filter(project_id=pk).order_by('-id')
queryset = auth.filter_task_queryset(queryset, request.user)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True,
context={"request": request})
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True,
context={"request": request})
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export project as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/project/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@swagger_auto_schema(method='get', operation_summary='Method allows to download project annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
'401': openapi.Response(description='Format is not specified'),
}
)
@action(detail=True, methods=['GET'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/projects/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
return Response("Format is not specified",status=status.HTTP_400_BAD_REQUEST)
class TaskFilter(filters.FilterSet):
project = filters.CharFilter(field_name="project__name", lookup_expr="icontains")
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
owner = filters.CharFilter(field_name="owner__username", lookup_expr="icontains")
mode = filters.CharFilter(field_name="mode", lookup_expr="icontains")
status = filters.CharFilter(field_name="status", lookup_expr="icontains")
assignee = filters.CharFilter(field_name="assignee__username", lookup_expr="icontains")
class Meta:
model = Task
fields = ("id", "project_id", "project", "name", "owner", "mode", "status",
"assignee")
class DjangoFilterInspector(CoreAPICompatInspector):
def get_filter_parameters(self, filter_backend):
if isinstance(filter_backend, DjangoFilterBackend):
result = super(DjangoFilterInspector, self).get_filter_parameters(filter_backend)
res = result.copy()
for param in result:
if param.get('name') == 'project_id' or param.get('name') == 'project':
res.remove(param)
return res
return NotHandled
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of tasks according to query parameters (10 tasks per page)',
manual_parameters=[
openapi.Parameter('id',openapi.IN_QUERY,description="A unique number value identifying this task",type=openapi.TYPE_NUMBER),
openapi.Parameter('name', openapi.IN_QUERY, description="Find all tasks where name contains a parameter value", type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="Find all tasks where owner name contains a parameter value", type=openapi.TYPE_STRING),
openapi.Parameter('mode', openapi.IN_QUERY, description="Find all tasks with a specific mode", type=openapi.TYPE_STRING, enum=['annotation', 'interpolation']),
openapi.Parameter('status', openapi.IN_QUERY, description="Find all tasks with a specific status", type=openapi.TYPE_STRING,enum=['annotation','validation','completed']),
openapi.Parameter('assignee', openapi.IN_QUERY, description="Find all tasks where assignee name contains a parameter value", type=openapi.TYPE_STRING)
],
filter_inspectors=[DjangoFilterInspector]))
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Method creates a new task in a database without any attached images and videos'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a specific task'))
@method_decorator(name='update', decorator=swagger_auto_schema(operation_summary='Method updates a task by id'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method deletes a specific task, all attached jobs, annotations, and data'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Methods does a partial update of chosen fields in a task'))
class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
queryset = Task.objects.all().prefetch_related(
"label_set__attributespec_set",
"segment_set__job_set",
).order_by('-id')
serializer_class = TaskSerializer
search_fields = ("name", "owner__username", "mode", "status")
filterset_class = TaskFilter
ordering_fields = ("id", "name", "owner", "status", "assignee")
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.TaskAccessPermission)
elif http_method in ["POST"]:
permissions.append(auth.TaskCreatePermission)
elif self.action == 'annotations' or http_method in ["PATCH", "PUT"]:
permissions.append(auth.TaskChangePermission)
elif http_method in ["DELETE"]:
permissions.append(auth.TaskDeletePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def _validate_task_limit(self, owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \
Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']:
raise serializers.ValidationError('The user has the maximum number of tasks')
def create(self, request):
action = self.request.query_params.get('action', None)
if action is None:
return super().create(request)
elif action == 'import':
self._validate_task_limit(owner=self.request.user)
if 'rq_id' in request.data:
rq_id = request.data['rq_id']
else:
rq_id = "{}@/api/v1/tasks/{}/import".format(request.user, uuid.uuid4())
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = TaskFileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
task_file = serializer.validated_data['task_file']
fd, filename = mkstemp(prefix='cvat_')
with open(filename, 'wb+') as f:
for chunk in task_file.chunks():
f.write(chunk)
rq_job = queue.enqueue_call(
func=import_task,
args=(filename, request.user.id),
job_id=rq_id,
meta={
'tmp_file': filename,
'tmp_file_descriptor': fd,
},
)
else:
if rq_job.is_finished:
task_id = rq_job.return_value
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response({'id': task_id}, status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'rq_id': rq_id}, status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def retrieve(self, request, pk=None):
db_task = self.get_object() # force to call check_object_permissions
action = self.request.query_params.get('action', None)
if action is None:
return super().retrieve(request, pk)
elif action in ('export', 'download'):
queue = django_rq.get_queue("default")
rq_id = "/api/v1/tasks/{}/export".format(pk)
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_task_update_time = timezone.localtime(db_task.updated_date)
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_task_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_task_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = "task_{}_backup_{}{}".format(
db_task.name, timestamp,
osp.splitext(file_path)[1])
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
ttl = dm.views.TASK_CACHE_TTL.total_seconds()
queue.enqueue_call(
func=dm.views.backup_task,
args=(pk, 'task_dump.zip'),
job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def perform_create(self, serializer):
owner = self.request.data.get('owner', None)
if owner:
self._validate_task_limit(owner)
serializer.save()
else:
self._validate_task_limit(self.request.user)
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
task_dirname = instance.get_task_dirname()
super().perform_destroy(instance)
shutil.rmtree(task_dirname, ignore_errors=True)
if instance.data and not instance.data.tasks.all():
shutil.rmtree(instance.data.get_data_dirname(), ignore_errors=True)
instance.data.delete()
@swagger_auto_schema(method='get', operation_summary='Returns a list of jobs for a specific task',
responses={'200': JobSerializer(many=True)})
@action(detail=True, methods=['GET'], serializer_class=JobSerializer)
def jobs(self, request, pk):
self.get_object() # force to call check_object_permissions
queryset = Job.objects.filter(segment__task_id=pk)
serializer = JobSerializer(queryset, many=True,
context={"request": request})
return Response(serializer.data)
@swagger_auto_schema(method='post', operation_summary='Method permanently attaches images or video to a task',
request_body=DataSerializer,
)
@swagger_auto_schema(method='get', operation_summary='Method returns data for a specific task',
manual_parameters=[
openapi.Parameter('type', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['chunk', 'frame', 'preview', 'context_image'],
description="Specifies the type of the requested data"),
openapi.Parameter('quality', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"),
openapi.Parameter('number', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_NUMBER,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"),
]
)
@action(detail=True, methods=['POST', 'GET'])
def data(self, request, pk):
db_task = self.get_object() # call check_object_permissions as well
if request.method == 'POST':
if db_task.data:
return Response(data='Adding more data is not supported',
status=status.HTTP_400_BAD_REQUEST)
serializer = DataSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
db_data = serializer.save()
db_task.data = db_data
db_task.save()
data = {k:v for k, v in serializer.data.items()}
data['use_zip_chunks'] = serializer.validated_data['use_zip_chunks']
data['use_cache'] = serializer.validated_data['use_cache']
data['copy_data'] = serializer.validated_data['copy_data']
if data['use_cache']:
db_task.data.storage_method = StorageMethodChoice.CACHE
db_task.data.save(update_fields=['storage_method'])
if data['server_files'] and not data.get('copy_data'):
db_task.data.storage = StorageChoice.SHARE
db_task.data.save(update_fields=['storage'])
if db_data.cloud_storage:
db_task.data.storage = StorageChoice.CLOUD_STORAGE
db_task.data.save(update_fields=['storage'])
# if the value of stop_frame is 0, then inside the function we cannot know
# the value specified by the user or it's default value from the database
if 'stop_frame' not in serializer.validated_data:
data['stop_frame'] = None
task.create(db_task.id, data)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
data_type = request.query_params.get('type', None)
data_id = request.query_params.get('number', None)
data_quality = request.query_params.get('quality', 'compressed')
possible_data_type_values = ('chunk', 'frame', 'preview', 'context_image')
possible_quality_values = ('compressed', 'original')
try:
if not data_type or data_type not in possible_data_type_values:
raise ValidationError(detail='Data type not specified or has wrong value')
elif data_type == 'chunk' or data_type == 'frame':
if not data_id:
raise ValidationError(detail='Number is not specified')
elif data_quality not in possible_quality_values:
raise ValidationError(detail='Wrong quality value')
db_data = db_task.data
if not db_data:
raise NotFound(detail='Cannot find requested data for the task')
frame_provider = FrameProvider(db_task.data, db_task.dimension)
if data_type == 'chunk':
data_id = int(data_id)
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
#TODO: av.FFmpegError processing
if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE:
buff, mime_type = frame_provider.get_chunk(data_id, data_quality)
return HttpResponse(buff.getvalue(), content_type=mime_type)
# Follow symbol links if the chunk is a link on a real image otherwise
# mimetype detection inside sendfile will work incorrectly.
path = os.path.realpath(frame_provider.get_chunk(data_id, data_quality))
return sendfile(request, path)
elif data_type == 'frame':
data_id = int(data_id)
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
buf, mime = frame_provider.get_frame(data_id, data_quality)
return HttpResponse(buf.getvalue(), content_type=mime)
elif data_type == 'preview':
return sendfile(request, frame_provider.get_preview())
elif data_type == 'context_image':
data_id = int(data_id)
image = Image.objects.get(data_id=db_data.id, frame=data_id)
for i in image.related_files.all():
path = os.path.realpath(str(i.path))
image = cv2.imread(path)
success, result = cv2.imencode('.JPEG', image)
if not success:
raise Exception('Failed to encode image to ".jpeg" format')
return HttpResponse(io.BytesIO(result.tobytes()), content_type='image/jpeg')
return Response(data='No context image related to the frame',
status=status.HTTP_404_NOT_FOUND)
else:
return Response(data='unknown data type {}.'.format(data_type), status=status.HTTP_400_BAD_REQUEST)
except APIException as e:
return Response(data=e.get_full_details(), status=e.status_code)
except FileNotFoundError as ex:
msg = f"{ex.strerror} {ex.filename}"
slogger.task[pk].error(msg, exc_info=True)
return Response(data=msg, status=status.HTTP_404_NOT_FOUND)
except Exception as e:
msg = 'cannot get requested data type: {}, number: {}, quality: {}'.format(data_type, data_id, data_quality)
slogger.task[pk].error(msg, exc_info=True)
return Response(data=msg + '\n' + str(e), status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(method='get', operation_summary='Method allows to download task annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@swagger_auto_schema(method='put', operation_summary='Method allows to upload task annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Input format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=False),
],
responses={
'202': openapi.Response(description='Uploading has been started'),
'201': openapi.Response(description='Uploading has finished'),
'405': openapi.Response(description='Format is not available'),
}
)
@swagger_auto_schema(method='patch', operation_summary='Method performs a partial update of annotations in a specific task',
manual_parameters=[openapi.Parameter('action', in_=openapi.IN_QUERY, required=True, type=openapi.TYPE_STRING,
enum=['create', 'update', 'delete'])])
@swagger_auto_schema(method='delete', operation_summary='Method deletes all annotations for a specific task')
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_task = self.get_object() # force to call check_object_permissions
if request.method == 'GET':
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_task_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
data = dm.task.get_task_data(pk)
serializer = LabeledDataSerializer(data=data)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
elif request.method == 'PUT':
format_name = request.query_params.get('format')
if format_name:
return _import_annotations(
request=request,
rq_id="{}@/api/v1/tasks/{}/annotations/upload".format(request.user, pk),
rq_func=dm.task.import_task_annotations,
pk=pk,
format_name=format_name,
)
else:
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
data = dm.task.put_task_data(pk, serializer.data)
return Response(data)
elif request.method == 'DELETE':
dm.task.delete_task_data(pk)
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PATCH':
action = self.request.query_params.get("action", None)
if action not in dm.task.PatchAction.values():
raise serializers.ValidationError(
"Please specify a correct 'action' for the request")
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.patch_task_data(pk, serializer.data, action)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@swagger_auto_schema(method='get', operation_summary='When task is being created the method returns information about a status of the creation process')
@action(detail=True, methods=['GET'], serializer_class=RqStatusSerializer)
def status(self, request, pk):
self.get_object() # force to call check_object_permissions
response = self._get_rq_response(queue="default",
job_id="/api/{}/tasks/{}".format(request.version, pk))
serializer = RqStatusSerializer(data=response)
if serializer.is_valid(raise_exception=True):
return Response(serializer.data)
@staticmethod
def _get_rq_response(queue, job_id):
queue = django_rq.get_queue(queue)
job = queue.fetch_job(job_id)
response = {}
if job is None or job.is_finished:
response = { "state": "Finished" }
elif job.is_queued:
response = { "state": "Queued" }
elif job.is_failed:
response = { "state": "Failed", "message": job.exc_info }
else:
response = { "state": "Started" }
if 'status' in job.meta:
response['message'] = job.meta['status']
return response
@staticmethod
@swagger_auto_schema(method='get', operation_summary='Method provides a meta information about media files which are related with the task',
responses={'200': DataMetaSerializer()})
@action(detail=True, methods=['GET'], serializer_class=DataMetaSerializer,
url_path='data/meta')
def data_info(request, pk):
db_task = models.Task.objects.prefetch_related(
Prefetch('data', queryset=models.Data.objects.select_related('video').prefetch_related(
Prefetch('images', queryset=models.Image.objects.prefetch_related('related_files').order_by('frame'))
))
).get(pk=pk)
if hasattr(db_task.data, 'video'):
media = [db_task.data.video]
else:
media = list(db_task.data.images.all())
frame_meta = [{
'width': item.width,
'height': item.height,
'name': item.path,
'has_related_context': hasattr(item, 'related_files') and item.related_files.exists()
} for item in media]
db_data = db_task.data
db_data.frames = frame_meta
serializer = DataMetaSerializer(db_data)
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export task as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_task = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_task_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@method_decorator(name='retrieve', decorator=swagger_auto_schema(operation_summary='Method returns details of a job'))
@method_decorator(name='update', decorator=swagger_auto_schema(operation_summary='Method updates a job by id'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Methods does a partial update of chosen fields in a job'))
class JobViewSet(viewsets.GenericViewSet,
mixins.RetrieveModelMixin, mixins.UpdateModelMixin):
queryset = Job.objects.all().order_by('id')
serializer_class = JobSerializer
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.JobAccessPermission)
elif http_method in ['PATCH', 'PUT', 'DELETE']:
permissions.append(auth.JobChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='Method returns annotations for a specific job')
@swagger_auto_schema(method='put', operation_summary='Method performs an update of all annotations in a specific job')
@swagger_auto_schema(method='patch', manual_parameters=[
openapi.Parameter('action', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True,
enum=['create', 'update', 'delete'])],
operation_summary='Method performs a partial update of annotations in a specific job')
@swagger_auto_schema(method='delete', operation_summary='Method deletes all annotations for a specific job')
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
self.get_object() # force to call check_object_permissions
if request.method == 'GET':
data = dm.task.get_job_data(pk)
return Response(data)
elif request.method == 'PUT':
format_name = request.query_params.get("format", "")
if format_name:
return _import_annotations(
request=request,
rq_id="{}@/api/v1/jobs/{}/annotations/upload".format(request.user, pk),
rq_func=dm.task.import_job_annotations,
pk=pk,
format_name=format_name
)
else:
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.put_job_data(pk, serializer.data)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
elif request.method == 'DELETE':
dm.task.delete_job_data(pk)
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PATCH':
action = self.request.query_params.get("action", None)
if action not in dm.task.PatchAction.values():
raise serializers.ValidationError(
"Please specify a correct 'action' for the request")
serializer = LabeledDataSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
data = dm.task.patch_job_data(pk, serializer.data, action)
except (AttributeError, IntegrityError) as e:
return Response(data=str(e), status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@swagger_auto_schema(method='get', operation_summary='Method returns list of reviews for the job',
responses={'200': ReviewSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=ReviewSerializer)
def reviews(self, request, pk):
db_job = self.get_object()
queryset = db_job.review_set
serializer = ReviewSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Method returns list of issues for the job',
responses={'200': CombinedIssueSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=CombinedIssueSerializer)
def issues(self, request, pk):
db_job = self.get_object()
queryset = db_job.issue_set
serializer = CombinedIssueSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@method_decorator(name='create', decorator=swagger_auto_schema(operation_summary='Submit a review for a job'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes a review from a job'))
class ReviewViewSet(viewsets.GenericViewSet, mixins.DestroyModelMixin, mixins.CreateModelMixin):
queryset = Review.objects.all().order_by('id')
def get_serializer_class(self):
if self.request.method == 'POST':
return CombinedReviewSerializer
else:
return ReviewSerializer
def get_permissions(self):
permissions = [IsAuthenticated]
if self.request.method == 'POST':
permissions.append(auth.JobReviewPermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def create(self, request, *args, **kwargs):
job_id = request.data['job']
db_job = get_object_or_404(Job, pk=job_id)
self.check_object_permissions(self.request, db_job)
if request.data['status'] == ReviewStatus.REVIEW_FURTHER:
if 'reviewer_id' not in request.data:
return Response('Must provide a new reviewer', status=status.HTTP_400_BAD_REQUEST)
reviewer_id = request.data['reviewer_id']
reviewer = get_object_or_404(User, pk=reviewer_id)
request.data.update({
'reviewer_id': request.user.id,
})
if db_job.assignee:
request.data.update({
'assignee_id': db_job.assignee.id,
})
issue_set = request.data['issue_set']
for issue in issue_set:
issue['job'] = db_job.id
issue['owner_id'] = request.user.id
comment_set = issue['comment_set']
for comment in comment_set:
comment['author_id'] = request.user.id
serializer = self.get_serializer(data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
if serializer.data['status'] == ReviewStatus.ACCEPTED:
db_job.status = StatusChoice.COMPLETED
db_job.save()
elif serializer.data['status'] == ReviewStatus.REJECTED:
db_job.status = StatusChoice.ANNOTATION
db_job.save()
else:
db_job.reviewer = reviewer
db_job.save()
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes an issue from a job'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Method updates an issue. It is used to resolve/reopen an issue'))
class IssueViewSet(viewsets.GenericViewSet, mixins.DestroyModelMixin, mixins.UpdateModelMixin):
queryset = Issue.objects.all().order_by('id')
http_method_names = ['get', 'patch', 'delete', 'options']
def get_serializer_class(self):
return IssueSerializer
def partial_update(self, request, *args, **kwargs):
db_issue = self.get_object()
if 'resolver_id' in request.data and request.data['resolver_id'] and db_issue.resolver is None:
# resolve
db_issue.resolver = request.user
db_issue.resolved_date = datetime.now()
db_issue.save(update_fields=['resolver', 'resolved_date'])
elif 'resolver_id' in request.data and not request.data['resolver_id'] and db_issue.resolver is not None:
# reopen
db_issue.resolver = None
db_issue.resolved_date = None
db_issue.save(update_fields=['resolver', 'resolved_date'])
serializer = self.get_serializer(db_issue)
return Response(serializer.data)
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.IssueAccessPermission)
elif http_method in ['DELETE']:
permissions.append(auth.IssueDestroyPermission)
elif http_method in ['PATCH']:
permissions.append(auth.IssueChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='The action returns all comments of a specific issue',
responses={'200': CommentSerializer(many=True)}
)
@action(detail=True, methods=['GET'], serializer_class=CommentSerializer)
def comments(self, request, pk):
db_issue = self.get_object()
queryset = db_issue.comment_set
serializer = CommentSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@method_decorator(name='partial_update', decorator=swagger_auto_schema(operation_summary='Method updates comment in an issue'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(operation_summary='Method removes a comment from an issue'))
class CommentViewSet(viewsets.GenericViewSet,
mixins.DestroyModelMixin, mixins.UpdateModelMixin, mixins.CreateModelMixin):
queryset = Comment.objects.all().order_by('id')
serializer_class = CommentSerializer
http_method_names = ['get', 'post', 'patch', 'delete', 'options']
def create(self, request, *args, **kwargs):
request.data.update({
'author_id': request.user.id,
})
issue_id = request.data['issue']
db_issue = get_object_or_404(Issue, pk=issue_id)
self.check_object_permissions(self.request, db_issue.job)
return super().create(request, args, kwargs)
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in ['PATCH', 'DELETE']:
permissions.append(auth.CommentChangePermission)
elif http_method in ['POST']:
permissions.append(auth.CommentCreatePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
class UserFilter(filters.FilterSet):
class Meta:
model = User
fields = ("id", "is_active")
@method_decorator(name='list', decorator=swagger_auto_schema(
manual_parameters=[
openapi.Parameter('id',openapi.IN_QUERY,description="A unique number value identifying this user",type=openapi.TYPE_NUMBER),
openapi.Parameter('is_active',openapi.IN_QUERY,description="Returns only active users",type=openapi.TYPE_BOOLEAN),
],
operation_summary='Method provides a paginated list of users registered on the server'))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_summary='Method provides information of a specific user'))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Method updates chosen fields of a user'))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_summary='Method deletes a specific user from the server'))
class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin):
queryset = User.objects.prefetch_related('groups').all().order_by('id')
http_method_names = ['get', 'post', 'head', 'patch', 'delete']
search_fields = ('username', 'email')
filterset_class = UserFilter
def get_serializer_class(self):
user = self.request.user
if user.is_staff:
return UserSerializer
else:
is_self = int(self.kwargs.get("pk", 0)) == user.id or \
self.action == "self"
if is_self and self.request.method in SAFE_METHODS:
return UserSerializer
else:
return BasicUserSerializer
def get_permissions(self):
permissions = [IsAuthenticated]
user = self.request.user
if not self.request.method in SAFE_METHODS:
is_self = int(self.kwargs.get("pk", 0)) == user.id
if not is_self:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
@swagger_auto_schema(method='get', operation_summary='Method returns an instance of a user who is currently authorized')
@action(detail=False, methods=['GET'])
def self(self, request):
"""
Method returns an instance of a user who is currently authorized
"""
serializer_class = self.get_serializer_class()
serializer = serializer_class(request.user, context={ "request": request })
return Response(serializer.data)
class RedefineDescriptionField(FieldInspector):
# pylint: disable=no-self-use
def process_result(self, result, method_name, obj, **kwargs):
if isinstance(result, openapi.Schema):
if hasattr(result, 'title') and result.title == 'Specific attributes':
result.description = 'structure like key1=value1&key2=value2\n' \
'supported: range=aws_range'
return result
@method_decorator(
name='retrieve',
decorator=swagger_auto_schema(
operation_summary='Method returns details of a specific cloud storage',
responses={
'200': openapi.Response(description='A details of a storage'),
},
tags=['cloud storages']
)
)
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_summary='Returns a paginated list of storages according to query parameters',
manual_parameters=[
openapi.Parameter('provider_type', openapi.IN_QUERY, description="A supported provider of cloud storages",
type=openapi.TYPE_STRING, enum=CloudProviderChoice.list()),
openapi.Parameter('display_name', openapi.IN_QUERY, description="A display name of storage", type=openapi.TYPE_STRING),
openapi.Parameter('resource', openapi.IN_QUERY, description="A name of bucket or container", type=openapi.TYPE_STRING),
openapi.Parameter('owner', openapi.IN_QUERY, description="A resource owner", type=openapi.TYPE_STRING),
openapi.Parameter('credentials_type', openapi.IN_QUERY, description="A type of a granting access", type=openapi.TYPE_STRING, enum=CredentialsTypeChoice.list()),
],
responses={'200': BaseCloudStorageSerializer(many=True)},
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField]
)
)
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_summary='Method deletes a specific cloud storage',
tags=['cloud storages']
)
)
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_summary='Methods does a partial update of chosen fields in a cloud storage instance',
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField]
)
)
class CloudStorageViewSet(auth.CloudStorageGetQuerySetMixin, viewsets.ModelViewSet):
http_method_names = ['get', 'post', 'patch', 'delete']
queryset = CloudStorageModel.objects.all().prefetch_related('data').order_by('-id')
search_fields = ('provider_type', 'display_name', 'resource', 'owner__username')
filterset_fields = ['provider_type', 'display_name', 'resource', 'credentials_type']
def get_permissions(self):
http_method = self.request.method
permissions = [IsAuthenticated]
if http_method in SAFE_METHODS:
permissions.append(auth.CloudStorageAccessPermission)
elif http_method in ("POST", "PATCH", "DELETE"):
permissions.append(auth.CloudStorageChangePermission)
else:
permissions.append(auth.AdminRolePermission)
return [perm() for perm in permissions]
def get_serializer_class(self):
if self.request.method in ("POST", "PATCH"):
return CloudStorageSerializer
else:
return BaseCloudStorageSerializer
def get_queryset(self):
queryset = super().get_queryset()
provider_type = self.request.query_params.get('provider_type', None)
if provider_type:
if provider_type in CloudProviderChoice.list():
return queryset.filter(provider_type=provider_type)
raise ValidationError('Unsupported type of cloud provider')
return queryset
def perform_create(self, serializer):
# check that instance of cloud storage exists
provider_type = serializer.validated_data.get('provider_type')
credentials = Credentials(
session_token=serializer.validated_data.get('session_token', ''),
account_name=serializer.validated_data.get('account_name', ''),
key=serializer.validated_data.get('key', ''),
secret_key=serializer.validated_data.get('secret_key', '')
)
details = {
'resource': serializer.validated_data.get('resource'),
'credentials': credentials,
'specific_attributes': {
item.split('=')[0].strip(): item.split('=')[1].strip()
for item in serializer.validated_data.get('specific_attributes').split('&')
} if len(serializer.validated_data.get('specific_attributes', ''))
else dict()
}
storage = get_cloud_storage_instance(cloud_provider=provider_type, **details)
try:
storage.exists()
except Exception as ex:
message = str(ex)
slogger.glob.error(message)
raise
owner = self.request.data.get('owner')
if owner:
serializer.save()
else:
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
cloud_storage_dirname = instance.get_storage_dirname()
super().perform_destroy(instance)
shutil.rmtree(cloud_storage_dirname, ignore_errors=True)
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_summary='Method creates a cloud storage with a specified characteristics',
responses={
'201': openapi.Response(description='A storage has beed created')
},
tags=['cloud storages'],
field_inspectors=[RedefineDescriptionField],
)
)
def create(self, request, *args, **kwargs):
try:
response = super().create(request, *args, **kwargs)
except IntegrityError:
response = HttpResponseBadRequest('Same storage already exists')
except ValidationError as exceptions:
msg_body = ""
for ex in exceptions.args:
for field, ex_msg in ex.items():
msg_body += ": ".join([field, str(ex_msg[0])])
msg_body += '\n'
return HttpResponseBadRequest(msg_body)
except APIException as ex:
return Response(data=ex.get_full_details(), status=ex.status_code)
except Exception as ex:
response = HttpResponseBadRequest(str(ex))
return response
@swagger_auto_schema(
method='get',
operation_summary='Method returns a mapped names of an available files from a storage and a manifest content',
manual_parameters=[
openapi.Parameter('manifest_path', openapi.IN_QUERY,
description="Path to the manifest file in a cloud storage",
type=openapi.TYPE_STRING)
],
responses={
'200': openapi.Response(description='Mapped names of an available files from a storage and a manifest content'),
},
tags=['cloud storages']
)
@action(detail=True, methods=['GET'], url_path='content')
def content(self, request, pk):
try:
db_storage = CloudStorageModel.objects.get(pk=pk)
credentials = Credentials()
credentials.convert_from_db({
'type': db_storage.credentials_type,
'value': db_storage.credentials,
})
details = {
'resource': db_storage.resource,
'credentials': credentials,
'specific_attributes': db_storage.get_specific_attributes()
}
storage = get_cloud_storage_instance(cloud_provider=db_storage.provider_type, **details)
storage.initialize_content()
storage_files = storage.content
manifest_path = request.query_params.get('manifest_path', 'manifest.jsonl')
with TemporaryDirectory(suffix='manifest', prefix='cvat') as tmp_dir:
tmp_manifest_path = os.path.join(tmp_dir, 'manifest.jsonl')
storage.download_file(manifest_path, tmp_manifest_path)
manifest = ImageManifestManager(tmp_manifest_path)
manifest.init_index()
manifest_files = manifest.data
content = {f:[] for f in set(storage_files) | set(manifest_files)}
for key, _ in content.items():
if key in storage_files: content[key].append('s') # storage
if key in manifest_files: content[key].append('m') # manifest
data = json.dumps(content)
return Response(data=data, content_type="aplication/json")
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
return HttpResponseNotFound(message)
except Exception as ex:
return HttpResponseBadRequest(str(ex))
def rq_handler(job, exc_type, exc_value, tb):
job.exc_info = "".join(
traceback.format_exception_only(exc_type, exc_value))
job.save()
if "tasks" in job.id.split("/"):
return task.rq_handler(job, exc_type, exc_value, tb)
return True
# TODO: Method should be reimplemented as a separated view
# @swagger_auto_schema(method='put', manual_parameters=[openapi.Parameter('format', in_=openapi.IN_QUERY,
# description='A name of a loader\nYou can get annotation loaders from this API:\n/server/annotation/formats',
# required=True, type=openapi.TYPE_STRING)],
# operation_summary='Method allows to upload annotations',
# responses={'202': openapi.Response(description='Load of annotations has been started'),
# '201': openapi.Response(description='Annotations have been uploaded')},
# tags=['tasks'])
# @api_view(['PUT'])
def _import_annotations(request, rq_id, rq_func, pk, format_name):
format_desc = {f.DISPLAY_NAME: f
for f in dm.views.get_import_formats()}.get(format_name)
if format_desc is None:
raise serializers.ValidationError(
"Unknown input format '{}'".format(format_name))
elif not format_desc.ENABLED:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = AnnotationFileSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
anno_file = serializer.validated_data['annotation_file']
fd, filename = mkstemp(prefix='cvat_{}'.format(pk))
with open(filename, 'wb+') as f:
for chunk in anno_file.chunks():
f.write(chunk)
av_scan_paths(filename)
rq_job = queue.enqueue_call(
func=rq_func,
args=(pk, filename, format_name),
job_id=rq_id
)
rq_job.meta['tmp_file'] = filename
rq_job.meta['tmp_file_descriptor'] = fd
rq_job.save_meta()
else:
if rq_job.is_finished:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_202_ACCEPTED)
def _export_annotations(db_instance, rq_id, request, format_name, action, callback, filename):
if action not in {"", "download"}:
raise serializers.ValidationError(
"Unexpected action specified for the request")
format_desc = {f.DISPLAY_NAME: f
for f in dm.views.get_export_formats()}.get(format_name)
if format_desc is None:
raise serializers.ValidationError(
"Unknown format specified for the request")
elif not format_desc.ENABLED:
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_instance_update_time = timezone.localtime(db_instance.updated_date)
if isinstance(db_instance, Project):
tasks_update = list(map(lambda db_task: timezone.localtime(db_task.updated_date), db_instance.tasks.all()))
last_instance_update_time = max(tasks_update + [last_instance_update_time])
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_instance_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_instance_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = filename or \
"{}_{}-{}-{}{}".format(
"project" if isinstance(db_instance, models.Project) else "task",
db_instance.name, timestamp,
format_name, osp.splitext(file_path)[1]
)
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
try:
if request.scheme:
server_address = request.scheme + '://'
server_address += request.get_host()
except Exception:
server_address = None
ttl = (dm.views.PROJECT_CACHE_TTL if isinstance(db_instance, Project) else dm.views.TASK_CACHE_TTL).total_seconds()
queue.enqueue_call(func=callback,
args=(db_instance.id, format_name, server_address), job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
|
the-stack_106_16653
|
#
# Copyright (c) 2013 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from beritest_tools import attr
#
# Test cbnz (capability branch if not NULL)
#
class test_cp2_cbez(BaseBERITestCase):
@attr('capabilities')
def test_cp2_cbez_1(self):
'''Test that CBEZ branches if the cap is zero'''
self.assertRegisterEqual(self.MIPS.a0, 0,
"cbnz did not branch when the cap was zero")
@attr('capabilities')
def test_cp2_cbez_2(self):
'''Test that CBEZ does not branch if the cap is not zero'''
self.assertRegisterEqual(self.MIPS.a1, 1,
"cbez branched when cap was not zero")
@attr('capabilities')
def test_cp2_cbez_3(self):
'''Test that CBEZ executes the instruction in the branch delay slot'''
self.assertRegisterEqual(self.MIPS.a2, 1,
"cbez did not execute instruction in branch delay slot")
|
the-stack_106_16655
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import unittest
import json
from pkg_resources import resource_filename
from PIL import Image
import numpy as np
from ingestclient.core.config import Configuration
from ingestclient.plugins.multipage_tiff import load_tiff_multipage
class TestSingleMultipageTiff(unittest.TestCase):
def test_SingleTimeTiffPathProcessor_setup(self):
"""Test setting up the path processor"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
assert pp.parameters["z_0"] == os.path.join(resource_filename("ingestclient", "test/data"),
"test_multipage.tif")
assert pp.parameters["ingest_job"]["extent"]["x"] == [0, 512]
def test_SingleTimeTiffPathProcessor_process(self):
"""Test running the path processor"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
assert pp.process(0, 0, 0, 0) == os.path.join(resource_filename("ingestclient", "test/data"),
"test_multipage.tif")
def test_SingleTimeTiffPathProcessor_process_invalid(self):
"""Test running the path processor with invalid tile indices"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
with self.assertRaises(IndexError):
pp.process(1, 0, 0, 0)
with self.assertRaises(IndexError):
pp.process(0, 1, 0, 0)
with self.assertRaises(IndexError):
pp.process(0, 0, 1, 0)
with self.assertRaises(IndexError):
pp.process(0, 0, 0, 11)
def test_SingleTimeTiffTileProcessor_setup(self):
"""Test setting up the tile processor"""
tp = self.config.tile_processor_class
tp.setup(self.config.get_tile_processor_params())
assert tp.parameters["datatype"] == "uint16"
assert tp.parameters["ingest_job"]["extent"]["y"] == [0, 256]
def test_SingleTimeTiffTileProcessor_process(self):
"""Test running the tile processor"""
pp = self.config.path_processor_class
pp.setup(self.config.get_path_processor_params())
tp = self.config.tile_processor_class
tp.setup(self.config.get_tile_processor_params())
filename = pp.process(0, 0, 0, 0)
handle = tp.process(filename, 0, 0, 0, 3)
# Open handle as image file
test_img = Image.open(handle)
test_img = np.array(test_img, dtype="uint16")
# Open original data
truth_img = load_tiff_multipage(filename)
truth_img = np.array(truth_img, dtype="uint16")
truth_img = truth_img[3, :, :]
# Make sure the same
np.testing.assert_array_equal(truth_img, test_img)
@classmethod
def setUpClass(cls):
cls.config_file = os.path.join(resource_filename("ingestclient", "test/data"), "boss-v0.1-singleMultipageTiff.json")
with open(cls.config_file, 'rt') as example_file:
cls.example_config_data = json.load(example_file)
# inject the file path since we don't want to hardcode
cls.example_config_data["client"]["path_processor"]["params"]["z_0"] = os.path.join(resource_filename("ingestclient",
"test/data"),
"test_multipage.tif")
cls.config = Configuration(cls.example_config_data)
cls.config.load_plugins()
|
the-stack_106_16657
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import jsonschema
from jsonschema import compat
import netaddr
# NOTE(kiall): All of the below regular expressions are terminated with
# "\Z", rather than simply "$" to ensure a string with a
# trailing newline is NOT matched. See bug #1471158.
RE_ZONENAME = r'^(?!.{255,})(?:(?!\-)[A-Za-z0-9_\-]{1,63}(?<!\-)\.)+\Z'
RE_HOSTNAME = r'^(?!.{255,})(?:(?:^\*|(?!\-)[A-Za-z0-9_\-]{1,63})(?<!\-)\.)+\Z'
RE_SRV_HOST_NAME = r'^(?:(?!\-)(?:\_[A-Za-z0-9_\-]{1,63}\.){2})(?!.{255,})' \
r'(?:(?!\-)[A-Za-z0-9_\-]{1,63}(?<!\-)\.)+\Z'
# The TLD name will not end in a period.
RE_TLDNAME = r'^(?!.{255,})(?:(?!\-)[A-Za-z0-9_\-]{1,63}(?<!\-))' \
r'(?:\.(?:(?!\-)[A-Za-z0-9_\-]{1,63}(?<!\-)))*\Z'
RE_UUID = r'^(?:[0-9a-fA-F]){8}-(?:[0-9a-fA-F]){4}-(?:[0-9a-fA-F]){4}-' \
r'(?:[0-9a-fA-F]){4}-(?:[0-9a-fA-F]){12}\Z'
RE_IP_AND_PORT = r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}' \
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)' \
r'(?::(?:6553[0-5]|655[0-2]\d|65[0-4]\d\d|6[0-4]\d{3}' \
r'|[1-5]\d{4}|[1-9]\d{0,3}|0))?\Z'
RE_FIP_ID = r'^(?P<region>[A-Za-z0-9\.\-_]{1,100}):' \
r'(?P<id>[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' \
r'[0-9a-fA-F]{4}-[0-9a-fA-F]{12})\Z'
RE_SSHFP_FINGERPRINT = r'^([0-9A-Fa-f]{10,40}|[0-9A-Fa-f]{64})\Z'
draft3_format_checker = jsonschema.draft3_format_checker
draft4_format_checker = jsonschema.draft4_format_checker
@draft3_format_checker.checks("ip-address")
@draft4_format_checker.checks("ipv4")
def is_ipv4(instance):
if not isinstance(instance, compat.str_types):
return True
try:
address = netaddr.IPAddress(instance, version=4)
# netaddr happly accepts, and expands "127.0" into "127.0.0.0"
if str(address) != instance:
return False
except Exception:
return False
if instance == '0.0.0.0': # RFC5735
return False
return True
@draft3_format_checker.checks("ipv6")
@draft4_format_checker.checks("ipv6")
def is_ipv6(instance):
if not isinstance(instance, compat.str_types):
return True
try:
netaddr.IPAddress(instance, version=6)
except Exception:
return False
return True
@draft3_format_checker.checks("host-name")
@draft4_format_checker.checks("hostname")
def is_hostname(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_HOSTNAME, instance):
return False
return True
@draft4_format_checker.checks("ns-hostname")
def is_ns_hostname(instance):
if not isinstance(instance, compat.str_types):
return True
# BIND doesn't like *.host.com. see bug #1533299
if not re.match(RE_ZONENAME, instance):
return False
return True
@draft3_format_checker.checks("ip-or-host")
@draft4_format_checker.checks("ip-or-host")
def is_ip_or_host(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_ZONENAME, instance)\
and not is_ipv4(instance)\
and not is_ipv6(instance):
return False
return True
@draft3_format_checker.checks("domain-name")
@draft4_format_checker.checks("domainname")
@draft3_format_checker.checks("zone-name")
@draft4_format_checker.checks("zonename")
def is_zonename(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_ZONENAME, instance):
return False
return True
@draft4_format_checker.checks("srv-hostname")
def is_srv_hostname(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_SRV_HOST_NAME, instance):
return False
return True
@draft4_format_checker.checks("txt-data")
def is_txt_data(instance):
if not isinstance(instance, compat.str_types):
return True
if instance.endswith('\\'):
return False
return True
@draft3_format_checker.checks("tld-name")
@draft4_format_checker.checks("tldname")
def is_tldname(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_TLDNAME, instance):
return False
return True
@draft3_format_checker.checks("email")
@draft4_format_checker.checks("email")
def is_email(instance):
if not isinstance(instance, compat.str_types):
return True
# A valid email address. We use the RFC1035 version of "valid".
if instance.count('@') != 1:
return False
rname = instance.replace('@', '.', 1)
if not re.match(RE_ZONENAME, "%s." % rname):
return False
return True
@draft4_format_checker.checks("sshfp")
def is_sshfp_fingerprint(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_SSHFP_FINGERPRINT, instance):
return False
return True
@draft3_format_checker.checks("uuid")
@draft4_format_checker.checks("uuid")
def is_uuid(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_UUID, instance):
return False
return True
@draft3_format_checker.checks("floating-ip-id")
@draft4_format_checker.checks("floating-ip-id")
def is_floating_ip_id(instance):
# TODO(kiall): Apparently, this is used in exactly zero places outside the
# tests. Determine if we should remove this code...
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_FIP_ID, instance):
return False
return True
@draft3_format_checker.checks("ip-and-port")
@draft4_format_checker.checks("ipandport")
def is_ip_and_port(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_IP_AND_PORT, instance):
return False
return True
|
the-stack_106_16658
|
import sys
import piecash
if sys.version_info.major == 3:
def run_file(fname):
with open(fname) as f:
code = compile(f.read(), fname, "exec")
exec(code, {})
else:
def run_file(fname):
return execfile(fname, {})
if len(sys.argv) == 1:
print("Specify as argument the path to the script to run")
sys.exit()
file = sys.argv.pop(1)
run_file(file)
|
the-stack_106_16659
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A unified and split coordinator for distributed TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import threading
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.training import server_lib
class _TaskType(object):
PS = "ps"
WORKER = "worker"
CHIEF = "chief"
EVALUATOR = "evaluator"
_coordinator_context = threading.local()
def get_current_coordinator_context():
"""Returns the current coordinator context."""
try:
return _coordinator_context.current
except AttributeError:
return None
class _Barrier(object):
"""A reusable barrier class for worker synchronization."""
def __init__(self, num_participants):
"""Initializes the barrier object.
Args:
num_participants: an integer which is the expected number of calls of
`wait` pass to through this barrier.
"""
self._num_participants = num_participants
self._counter = 0
self._flag = False
self._local_sense = threading.local()
self._lock = threading.Lock()
self._condition = threading.Condition()
def wait(self):
"""Waits until all other callers reach the same wait call."""
if not hasattr(self._local_sense, "value"):
self._local_sense.value = False
self._local_sense.value = not self._flag
with self._lock:
self._counter += 1
if self._counter == self._num_participants:
self._counter = 0
self._flag = self._local_sense.value
with self._condition:
while self._flag != self._local_sense.value:
self._condition.wait()
self._condition.notify_all()
def _get_num_workers(cluster_spec):
"""Gets number of workers including chief."""
if not cluster_spec:
return 0
return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(
cluster_spec.as_dict().get(_TaskType.CHIEF, []))
class _CoordinatorContext(object):
"""The coordinator context class.
This context object provides configuration information for each task. One
context manager with a coordinator context object will be created per
invocation to the `worker_fn` where `get_current_coordinator_context` can be
called to access the coordinator context object.
"""
def __init__(self,
cluster_spec,
task_type,
task_id,
between_graph=False,
rpc_layer="grpc",
worker_barrier=None):
"""Initialize the coordinator context object.
Args:
cluster_spec: a ClusterSpec object. It can be empty or None in the local
training case.
task_type: a string indicating the role of the corresponding task, such as
"worker" or "ps". It can be None if it is local training or
`between_graph` is False.
task_id: an integer indicating id of the corresponding task. It can be
None if it is local training or `between_graph` is False.
between_graph: whether it is between-graph replication or not.
rpc_layer: optional string specifying the RPC protocol for communication
with worker masters. If None or empty, hosts in the `cluster_spec` will
be used directly.
worker_barrier: optional, the barrier object for worker synchronization.
Raises:
ValueError: if task_type or task_id is Node or empty and it is distributed
between-graph replicated training.
"""
if cluster_spec and between_graph:
if not task_type or task_id is None:
raise ValueError("`task_type` and `task_id` must be set in the "
"distributed between-graph replicated training.")
if task_type not in cluster_spec.jobs:
raise ValueError("`task_type` %r not found in the `cluster_spec` %r" %
(task_type, cluster_spec))
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._worker_barrier = worker_barrier
self._rpc_layer = rpc_layer
self._master_target = self._get_master_target()
self._num_workers = _get_num_workers(cluster_spec)
self._is_chief_node = self._is_chief()
def __enter__(self):
old_context = get_current_coordinator_context()
if old_context:
raise ValueError(
"You cannot run distribute coordinator in a `worker_fn`.")
_coordinator_context.current = self
def __exit__(self, unused_exception_type, unused_exception_value,
unused_traceback):
_coordinator_context.current = None
def _get_master_target(self):
"""Return the master target for a task."""
# If cluster_spec is None or empty, we use local master.
if not self._cluster_spec:
return "local"
# If task_type is None, then it is in-graph replicated training. In this
# case we use the chief or first worker's master target.
if not self._task_type:
if _TaskType.CHIEF in self._cluster_spec.jobs:
assert not self.between_graph
task_type = _TaskType.CHIEF
task_id = 0
else:
assert _TaskType.WORKER in self._cluster_spec.jobs
task_type = _TaskType.WORKER
task_id = 0
else:
task_type = self._task_type
task_id = self._task_id
prefix = ""
if self._rpc_layer:
prefix = self._rpc_layer + "://"
return prefix + self._cluster_spec.job_tasks(task_type)[task_id or 0]
def _is_chief(self):
"""Return whether the task is the chief worker."""
if (not self._cluster_spec or self._task_type in [_TaskType.CHIEF, None]):
return True
# If not local and chief not in the cluster_spec, use the first worker as
# chief.
if (_TaskType.CHIEF not in self._cluster_spec.jobs and
self._task_type == _TaskType.WORKER and self._task_id == 0):
return True
return False
def wait_for_other_workers(self):
"""Waits for other workers to reach the same call to this method.
Raises:
ValueError: if `worker_barrier` is not passed to the __init__ method.
"""
if not self._worker_barrier:
raise ValueError(
"`worker_barrier is not set in the coordinator context.`")
self._worker_barrier.wait()
@property
def distributed_mode(self):
"""Whether it is distributed training or not."""
return bool(self._cluster_spec)
@property
def cluster_spec(self):
"""Returns a copy of the cluster_spec object."""
return copy.deepcopy(self._cluster_spec)
@property
def task_type(self):
"""Returns the role of the corresponing task."""
return self._task_type
@property
def task_id(self):
"""Returns the id or index of the corresponing task."""
return self._task_id
@property
def master_target(self):
"""Returns the session master for the corresponding task to connect to."""
return self._master_target
@property
def is_chief(self):
"""Returns whether the task is a chief node."""
return self._is_chief_node
@property
def num_workers(self):
"""Returns number of workers in the cluster, including chief."""
return self._num_workers
def _run(worker_fn, cluster_spec, task_type, task_id, between_graph, rpc_layer,
worker_barrier):
with _CoordinatorContext(cluster_spec, task_type, task_id, between_graph,
rpc_layer, worker_barrier):
worker_fn()
def run_distribute_coordinator(worker_fn,
cluster_spec=None,
between_graph=False,
rpc_layer=None):
"""Run the coordinator for distributed TensorFlow.
This function runs a unified and split coordinator for distributed TensorFlow.
Given a `cluster_spec` specifying server addresses and their roles in a
cluster, this coordinator will figure out how to set them up, give the
underlying function the right targets for master sessions and coordinate their
training.
In addition to be the distribute coordinator, this is also the source of
configurations for each job in the distributed training. As there are multiple
ways to configure a distributed TensorFlow cluster, its context object
provides these configurations so that users or higher-level APIs don't have to
figure out the configuration for each job by themselves.
In the between-graph replicated training, this coordinator will create
multiple threads and each calls the `worker_fn` which is supposed to create
its own graph and connect to one worker master given by its coordinator
context. In the in-graph replicated training, it has only one thread calling
this `worker_fn`.
The `worker_fn` defines the training logic and is called under a its own
coordinator context which can be accessed to via
`get_current_coordinator_context`. A coordinator context provides access to
configurations for each task, e.g. the task_type, task_id, master target and
so on. Since `worker_fn` will be called in a thread and possibly multiple
times, caller should be careful when it accesses global data. For example, it
is unsafe to define flags in a `worker_fn` or to define different environment
variables for different `worker_fn`s.
The `worker_fn` for the between-graph replication is defined as if there are
only one worker corresponding to the `worker_fn` and possibly ps jobs. It
assigns variables to parameter servers and all other operations to that
worker. In the in-graph replication case, the `worker_fn` has to define
operations for all worker jobs. Using a distribution strategy can simplify the
`worker_fn` by not having to worry about the replication and device assignment
of variables and operations.
This method is intended to be invoked by high-level APIs so that users don't
have to explictly call it to run this coordinator. For those who don't use
high-level APIs, to change a program to use this coordinator, wrap everything
in a the program after global data definitions such as commandline flag
definition into the `worker_fn` and get task-specific configurations from
the coordinator context.
The `cluster_spec` can be either passed by the argument or parsed from the
"TF_CONFIG" envrionment variable. Example of a TF_CONFIG:
```
cluster = {'chief': ['host0:2222'],
'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster})
```
If `cluster_spec` is not given in any format, it becomes local training and
this coordinator will connect to a local session.
For evaluation, if "evaluator" exist in the cluster_spec, a separate thread
will be created with its `task_type` set to "evaluator". If "evaluator" is not
set in the cluster_spec, it entirely depends on the `worker_fn` for how to do
evaluation.
Args:
worker_fn: the function to be called and given the access to a coordinator
context object.
cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles
in a cluster. If not set or empty, fall back to local training.
between_graph: a boolean. It is only useful when `cluster_spec` is set and
not empty. If true, it will use between-graph replicated training;
otherwise it will use in-graph replicated training.
rpc_layer: optional string, the protocol for RPC, e.g. "grpc".
Raises:
ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or
a ClusterSpec.
"""
if not cluster_spec:
tf_config = json.loads(os.environ.get("TF_CONFIG", "{}"))
cluster_spec = tf_config.get("cluster", {})
if cluster_spec:
if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)):
cluster_spec = server_lib.ClusterSpec(cluster_spec)
elif not isinstance(cluster_spec, server_lib.ClusterSpec):
raise ValueError(
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object")
# TODO(yuefengz): validate cluster_spec.
threads = []
if cluster_spec and _TaskType.EVALUATOR in cluster_spec.jobs:
t = threading.Thread(
target=_run,
args=(worker_fn, cluster_spec, _TaskType.EVALUATOR, 0, between_graph,
rpc_layer, None))
t.start()
threads.append(t)
if cluster_spec and between_graph:
worker_barrier = _Barrier(_get_num_workers(cluster_spec))
for task_type in [_TaskType.CHIEF, _TaskType.WORKER]:
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
t = threading.Thread(
target=_run,
args=(worker_fn, cluster_spec, task_type, task_id, between_graph,
rpc_layer, worker_barrier))
t.start()
threads.append(t)
else:
# Local or in-graph replicated training.
_run(worker_fn, cluster_spec, None, None, between_graph, rpc_layer, None)
# TODO(yuefengz): wrapper threads into thread coordinator?
for t in threads:
t.join()
|
the-stack_106_16660
|
#import modules
import heterocl as hcl
from PIL import Image
import math
import os
import numpy as np
import imageio
import time
#need to initiate hcl
hcl.init(init_dtype=hcl.Float())
#path to input image
path = 'lane_fixed.png'
image = imageio.imread(path)
npimage = np.asarray(image)
imgdata = hcl.asarray(npimage)
#specify width and height of the input image
height, width, dummy = image.shape
#need to define placeholders to define kernel and create schedule
data = hcl.placeholder((height, width, dummy), "data", dtype=hcl.Float())
lowthresh = hcl.placeholder((1,), "lowthresh", dtype=hcl.Float())
highthresh = hcl.placeholder((1,), "highthresh", dtype=hcl.Float())
#define kernel for all computations done in hcl
def kernel(data, lowthresh, highthresh):
newdata = hcl.compute((height, width), lambda x,y: data[x][y][0] + data[x][y][1] + data[x][y][2], "newdata", dtype=hcl.Float())
def compute_out(data, x, y, lowthresh, highthresh):
with hcl.if_(data[x][y] < lowthresh[0]):
hcl.return_(0)
with hcl.elif_(data[x][y] >= highthresh[0]):
hcl.return_(255)
with hcl.else_():
hcl.return_(25)
return hcl.compute((height,width), lambda x,y: compute_out(newdata,x,y, lowthresh, highthresh))
#create schedule and function
sched = hcl.create_schedule([data, lowthresh, highthresh], kernel)
func = hcl.build(sched)
#need to define input/output array as an hcl array
high = 0.09 * (int(npimage[..., 0].max()) + int(npimage[..., 1].max()) + int(npimage[..., 2].max()))
low = 0.05 * high
print(height)
print(width)
result = hcl.asarray(np.zeros((height, width)) ,dtype=hcl.Float())
hclhigh = hcl.asarray(np.array([high]))
hcllow = hcl.asarray(np.array([low]))
#run the function
func(imgdata, hcllow, hclhigh, result)
print(result)
#change the type of output back to numpy array
newresult = result.asnumpy().astype(int)
print(newresult)
#define array for image
newimgarry = np.zeros((height, width, 3))
#assign (length, length, length) to each pixel
for x in range (0, height):
for y in range (0, width):
for z in range (0, 3):
newimgarry[x,y,z] = newresult[x,y]
#create an image with the array
imageio.imsave('lane_fixedmore.png', newimgarry)
print(time.process_time())
|
the-stack_106_16662
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink14.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks. This example has writes a url in a range."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format = workbook.add_format({'align': 'center'})
worksheet.merge_range('C4:E5', '', format,)
worksheet.write_url('C4', 'http://www.perl.org/', format, 'Perl Home')
workbook.close()
self.assertExcelEqual()
|
the-stack_106_16664
|
import abc
import collections
import copy
import confu.schema
import datetime
import logging
import os
import munge
from future.utils import with_metaclass
import vaping.io
from vaping.config import parse_interval
class PluginConfigSchema(confu.schema.Schema):
"""
Configuration Schema for [PluginBase](#pluginbase)
When creating new configuration schemas for extended plugins
extend this.
"""
name = confu.schema.Str("name", help="Plugin name")
type = confu.schema.Str("type", help="Plugin type")
class PluginBase(vaping.io.Thread):
"""
Base plugin interface
# Class Attributes
- lazy_start (`bool`=`False`): if `True` plugin will not be
started on vaping start, but when at a later point (usually
when it starts emitting). Note that the plugin itself will
need to call `self.start()` somewhere explicitly when this is `True`.
# Instanced Attributes
- config (`dict`): plugin config
- vaping: reference to the main vaping object
Calls `self.init()` prefork while loading all modules, init() should
not do anything active, any files opened may be closed when it forks.
Plugins should prefer `init()` to `__init__()` to ensure the class is
completely done initializing.
Calls `self.on_start()` and `self.on_stop()` before and after running in
case any connections need to be created or cleaned up.
"""
lazy_start = False
ConfigSchema = PluginConfigSchema
ConfigSchema.help = "Base plugin config schema"
@property
def groups(self):
"""
`dict` - group configurations keyed by name
"""
group_config = {}
# legacy way of threating any dict as a potential
# group config (pre #44 implementation)
# supported until vaping 2.0
for k, v in list(self.config.items()):
if isinstance(v, collections.Mapping):
group_config[k] = v
# explicit groups object (#44 implementation)
for _group_config in self.config.get("groups", []):
group_config[_group_config["name"]] = _group_config
return group_config
def init(self):
"""
called after the plugin is initialized, plugin may define this for any
other initialization code
"""
pass
def on_start(self):
"""
called when the daemon is starting
"""
pass
def on_stop(self):
"""
called when the daemon is stopping
"""
pass
def new_message(self):
"""
creates and returns new message `dict`, setting `type`, `source`, `ts`, `data`
`data` is initialized to an empty array
**Returns**
message (`dict`)
"""
msg = {}
msg["data"] = []
msg["type"] = self.plugin_type
msg["source"] = self.name
msg["ts"] = (
datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
).total_seconds()
return msg
def popen(self, args, **kwargs):
"""
creates a subprocess with passed args
**Returns**
Popen instance
"""
self.log.debug("popen %s", " ".join(args))
return vaping.io.subprocess.Popen(args, **kwargs)
@property
def log(self):
"""
logger instance for plugin type
"""
if not self._logger:
self._logger = logging.getLogger("vaping.plugins." + self.plugin_type)
return self._logger
def __init__(self, config, ctx):
"""
**Arguments**
- config (`dict`)
- ctx: vaping context
"""
# FIXME: figure out what from this we want to keep
if hasattr(self, "default_config"):
self.config = munge.util.recursive_update(
copy.deepcopy(self.default_config), copy.deepcopy(config)
)
else:
self.config = config
if hasattr(self, "ConfigSchema"):
confu.schema.apply_defaults(self.ConfigSchema(), config)
# set for pluginmgr
self.pluginmgr_config = self.config
self.vaping = ctx
self.name = self.config.get("name")
self._logger = None
self.lazy_start = False
self.started = False
super().__init__()
self.init()
async def _run(self):
self.on_start()
class ProbeBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for probe plugin, used for getting data
expects method probe() to be defined
"""
def init(self):
pass
@abc.abstractmethod
def probe(self):
"""
probe for data, return a list of dicts
"""
def __init__(self, config, ctx, emit=None):
if emit:
self._emit = [emit]
else:
self._emit = []
self._emit_queue = vaping.io.Queue()
super().__init__(config, ctx)
async def _run(self):
super()._run()
self.on_start()
self.run_level = 1
while self.run_level:
self.send_emission()
msg = self.probe()
if msg:
await self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
await vaping.io.sleep(0.1)
async def queue_emission(self, msg):
"""
queue an emission of a message for all output plugins
**Arguments**
- msg (`dict`): dict containing `type`, `source`, `ts` and `data` keys
"""
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, "emit"):
continue
async def emit(emitter=_emitter):
self.log.debug(f"emit to {emitter.name}")
emitter.emit(msg)
self.log.debug(
"queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()
)
)
await self._emit_queue.put(emit)
async def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
return
emit = self._emit_queue.get_nowait()
await emit()
async def emit_all(self):
"""
emit and remove all emissions in the queue
"""
while not self._emit_queue.empty():
await self.send_emission()
class TimedProbeSchema(PluginConfigSchema):
interval = confu.schema.Str()
class TimedProbe(ProbeBase):
"""
Probe class that calls probe every config defined interval
"""
ConfigSchema = TimedProbeSchema
def __init__(self, config, ctx, emit=None):
super().__init__(config, ctx, emit)
if "interval" not in self.pluginmgr_config:
raise ValueError("interval not set in config")
self.interval = parse_interval(self.pluginmgr_config["interval"])
self.run_level = 0
async def _run(self):
self.run_level = 1
while self.run_level:
start = datetime.datetime.now()
# since the TimedProbe will sleep between cycles
# we need to emit all queued emissions each cycle
await self.emit_all()
msg = self.probe()
if msg:
await self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
done = datetime.datetime.now()
elapsed = done - start
if elapsed.total_seconds() > self.interval:
self.log.warning("probe time exceeded interval")
else:
sleeptime = datetime.timedelta(seconds=self.interval) - elapsed
await vaping.io.sleep(sleeptime.total_seconds())
class FileProbeSchema(PluginConfigSchema):
path = confu.schema.Str()
backlog = confu.schema.Int(default=10)
max_lines = confu.schema.Int(default=1000)
class FileProbe(ProbeBase):
"""
Probes a file and emits everytime a new line is read
# Config
- path (`str`): path to file
- backlog (`int=0`): number of bytes to read from backlog
- max_lines (`int=1000`): maximum number of lines to read during probe
# Instanced Attributes
- path (`str`): path to file
- backlog (`int`): number of bytes to read from backlog
- max_lines (`int`): maximum number of liens to read during probe
- fh (`filehandler`): file handler for opened file (only available if `path` is set)
"""
ConfigSchema = FileProbeSchema
def __init__(self, config, ctx, emit=None):
super().__init__(config, ctx, emit)
self.path = self.pluginmgr_config.get("path")
self.run_level = 0
self.backlog = int(self.pluginmgr_config.get("backlog", 0))
self.max_lines = int(self.pluginmgr_config.get("max_lines", 1000))
if self.path:
self.fh = open(self.path)
self.fh.seek(0, 2)
if self.backlog:
try:
self.fh.seek(self.fh.tell() - self.backlog, os.SEEK_SET)
except ValueError as exc:
if str(exc).find("negative seek position") > -1:
self.fh.seek(0)
else:
raise
async def _run(self):
self.run_level = 1
while self.run_level:
self.send_emission()
for msg in self.probe():
await self.queue_emission(msg)
await vaping.io.sleep(0.1)
def validate_file_handler(self):
"""
Here we validate that our filehandler is pointing
to an existing file.
If it doesnt, because file has been deleted, we close
the filehander and try to reopen
"""
if self.fh.closed:
try:
self.fh = open(self.path)
self.fh.seek(0, 2)
except OSError as err:
logging.error(f"Could not reopen file: {err}")
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error(f"Could not stat file: {err}")
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path": self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
def process_line(self, line, data):
""" override this - parse your line in here """
return data
def process_probe(self, data):
""" override this - assign your data values here """
return data
def process_messages(self, messages):
"""
override this - process your messages before they
are emitted
"""
return messages
class EmitBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for emit plugins, used for sending data
expects method emit() to be defined
"""
def __init__(self, config, ctx):
super().__init__(config, ctx)
@abc.abstractmethod
def emit(self, message):
""" accept message to emit """
class TimeSeriesDBSchema(PluginConfigSchema):
filename = confu.schema.Str(help="database file name template")
field = confu.schema.Str(help="field name to read the value from")
class TimeSeriesDB(EmitBase):
"""
Base interface for timeseries db storage plugins
# Config
- filename (`str`): database file name template
- field (`str`): field name to read the value from
# Instanced Attributes
- filename (`str`): database file name template
- field (`str`): fieeld name to read the value from
"""
ConfigSchema = TimeSeriesDBSchema
def __init__(self, config, ctx):
super().__init__(config, ctx)
# filename template
self.filename = self.config.get("filename")
# field name to read the value from
self.field = self.config.get("field")
if not self.filename:
raise ValueError("No filename specified")
if not self.field:
raise ValueError(
"No field specified, field should specify which value to store in the database"
)
def create(self, filename):
"""
Create database
**Arguments**
- filename (`str`): database filename
"""
raise NotImplementedError()
def update(self, filename, time, value):
"""
Update database
**Arguments**
- filename (`str`): database filename
- time (`int`): epoch timestamp
- value (`mixed`)
"""
raise NotImplementedError()
def get(self, filename, from_time, to_time):
"""
Retrieve data from database for the specified
timespan
**Arguments**
- filename (`str`): database filename
- from_time (`int`): epoch timestamp start
- to_time (`int`): epoch timestamp end
"""
raise NotImplementedError()
def filename_formatters(self, data, row):
"""
Returns a dict containing the various filename formatter values
Values are gotten from the vaping data message as well as the
currently processed row in the message
**Arguments**
- data (`dict`): vaping message
- row (`dict`): vaping message data row
**Returns**
formatter variables (`dict`)
"""
r = {
"source": data.get("source"),
"field": self.field,
"type": data.get("type"),
}
r.update(**row)
return r
def format_filename(self, data, row):
"""
Returns a formatted filename using the template stored
in self.filename
**Arguments**
- data (`dict`): vaping message
- row (`dict`): vaping message data row
**Returns**
formatted version of self.filename (`str`)
"""
return self.filename.format(**self.filename_formatters(data, row))
def emit(self, message):
"""
emit to database
**Arguments**
- message (`dict`): vaping message dict
"""
# handle vaping data that arrives in a list
if isinstance(message.get("data"), list):
for row in message.get("data"):
# format filename from data
filename = self.format_filename(message, row)
# create database file if it does not exist yet
if not os.path.exists(filename):
self.create(filename)
# update database
self.log.debug(
"storing time:%d, %s:%s in %s"
% (
message.get("ts"),
self.field,
row.get(self.field, "-"),
filename,
)
)
self.update(filename, message.get("ts"), row.get(self.field))
|
the-stack_106_16665
|
import nose.tools as nt
from .test_embed_kernel import setup, teardown, setup_kernel
TIMEOUT = 15
def test_ipython_start_kernel_userns():
cmd = ('from IPython import start_kernel\n'
'ns = {"tre": 123}\n'
'start_kernel(user_ns=ns)')
with setup_kernel(cmd) as client:
msg_id = client.object_info('tre')
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
content = msg['content']
assert content['found']
nt.assert_equal(content['string_form'], '123')
# user_module should be an instance of DummyMod
msg_id = client.execute("usermod = get_ipython().user_module")
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
content = msg['content']
nt.assert_equal(content['status'], 'ok')
msg_id = client.object_info('usermod')
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
content = msg['content']
assert content['found']
nt.assert_in('DummyMod', content['string_form'])
def test_ipython_start_kernel_no_userns():
# Issue #4188 - user_ns should be passed to shell as None, not {}
cmd = ('from IPython import start_kernel\n'
'start_kernel()')
with setup_kernel(cmd) as client:
# user_module should not be an instance of DummyMod
msg_id = client.execute("usermod = get_ipython().user_module")
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
content = msg['content']
nt.assert_equal(content['status'], 'ok')
msg_id = client.object_info('usermod')
msg = client.get_shell_msg(block=True, timeout=TIMEOUT)
content = msg['content']
assert content['found']
nt.assert_not_in('DummyMod', content['string_form'])
|
the-stack_106_16667
|
"""
Misc tools for implementing data structures
"""
import re
import collections
import numbers
import codecs
import csv
import types
from datetime import datetime, timedelta
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas as pd
import pandas.algos as algos
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import StringIO, BytesIO, range, long, u, zip, map
from pandas.core.config import get_option
from pandas.core import array as pa
class PandasError(Exception):
pass
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
for t in ['O', 'int8',
'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
'm8[ns]', '<m8[ns]', '>m8[ns]']])
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, None) in comp
dct = dict(__instancecheck__=_check,
__subclasscheck__=_check)
meta = type("ABCBase", (type,), dct)
return meta(name, tuple(), dct)
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not compat.PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
def __init__(cls, name, bases, attrs):
pass
class CategoricalDtype(object):
__meta__ = CategoricalDtypeType
"""
A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
name = 'category'
names = None
type = CategoricalDtypeType
subdtype = None
kind = 'O'
str = '|O08'
num = 100
shape = tuple()
itemsize = 8
base = np.dtype('O')
isbuiltin = 0
isnative = 0
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, CategoricalDtype)
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notnull: boolean inverse of pandas.isnull
"""
return _isnull(obj)
def _isnull_new(obj):
if lib.isscalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=isnull))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isnull_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if lib.isscalar(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=_isnull_old))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isnull = _isnull_new
def _use_inf_as_null(key):
"""Option change callback for null/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
flag = get_option(key)
if flag:
globals()['_isnull'] = _isnull_old
else:
globals()['_isnull'] = _isnull_new
def _isnull_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[...] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if values.dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notnull(obj):
"""Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
on object arrays.
Parameters
----------
arr : ndarray or object value
Object to check for *not*-null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is *not* null or if an array
is given which of the element is *not* null.
See also
--------
pandas.isnull : boolean inverse of pandas.notnull
"""
res = isnull(obj)
if np.isscalar(res):
return not res
return ~res
def _is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is pd.NaT or other is None:
return True
elif np.isscalar(other):
# a timedelta
if hasattr(other,'dtype'):
return other.view('i8') == tslib.iNaT
elif is_integer(other) and other == tslib.iNaT:
return True
return isnull(other)
return False
def array_equivalent(left, right):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs in
corresponding locations. False otherwise. It is assumed that left and right
are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(np.array([1, 2, nan]), np.array([1, 2, nan]))
True
>>> array_equivalent(np.array([1, nan, 2]), np.array([1, 2, nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
if left.shape != right.shape: return False
# NaNs occur only in object arrays, float or complex arrays.
if issubclass(left.dtype.type, np.object_):
return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all()
if issubclass(left.dtype.type, (np.floating, np.complexfloating)):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
return np.array_equal(left, right)
def _iterable_not_string(x):
return (isinstance(x, collections.Iterable) and
not isinstance(x, compat.string_types))
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
if not isinstance(values_to_mask, (list, np.ndarray)):
values_to_mask = [values_to_mask]
try:
values_to_mask = np.array(values_to_mask, dtype=arr.dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isnull(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if np.isscalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isnull(arr)
else:
mask |= isnull(arr)
return mask
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
# All datetimes should be stored as M8[ns]. When unpickling with
# numpy1.6, it will read these as M8[us]. So this ensures all
# datetime64 types are read as MS[ns]
if is_datetime64_dtype(arr):
arr = arr.view(_NS_DTYPE)
return arr
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_1d_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
_take_nd_generic(arr, indexer, out, axis=axis,
fill_value=fill_value, mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
mask_info=None, allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
common._maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype,
axis=axis, mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan,
mask_info=None, allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_generic(arr, indexer, out,
fill_value=fill_value, mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
""" difference of n between self,
analagoust to s-s.shift(n) """
n = int(n)
dtype = arr.dtype
na = np.nan
if is_timedelta64_dtype(arr) or is_datetime64_dtype(arr):
dtype = 'timedelta64[ns]'
arr = arr.view('i8')
na = tslib.iNaT
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif issubclass(dtype.type, np.bool_):
dtype = np.object_
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if dtype == 'timedelta64[ns]':
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
return out_arr
def _coerce_to_dtypes(result, dtypes):
""" given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
def conv(r, dtype):
try:
if isnull(r):
pass
elif dtype == _NS_DTYPE:
r = lib.Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0,1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def _infer_dtype_from_scalar(val):
""" interpret the dtype from a scalar, upcast floats and ints
return the new value and the dtype """
dtype = np.object_
# a 1-element ndarray
if isinstance(val, pa.Array):
if val.ndim != 0:
raise ValueError(
"invalid ndarray passed to _infer_dtype_from_scalar")
dtype = val.dtype
val = val.item()
elif isinstance(val, compat.string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)) and getattr(val,'tz',None) is None:
val = lib.Timestamp(val).value
dtype = np.dtype('M8[ns]')
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslib.convert_to_timedelta(val,'ns')
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
# provide implicity upcast on scalars
elif is_integer(val):
dtype = np.int64
elif is_float(val):
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
return dtype, val
def _maybe_cast_scalar(dtype, value):
""" if we a scalar value and are casting to a dtype that needs nan -> NaT
conversion
"""
if np.isscalar(value) and dtype in _DATELIKE_DTYPES and isnull(value):
return tslib.iNaT
return value
def _maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = tslib.iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
# for now: refuse to upcast datetime64
# (this is because datetime64 will not implicitly upconvert
# to object correctly as of numpy 1.6.1)
if isnull(fill_value):
fill_value = tslib.iNaT
else:
if issubclass(dtype.type, np.datetime64):
try:
fill_value = lib.Timestamp(fill_value).value
except:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
fill_value = tslib.iNaT
else:
fill_value = tslib.iNaT
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
else:
dtype = np.object_
# in case we have a string that looked like a number
if issubclass(np.dtype(dtype).type, compat.string_types):
dtype = np.object_
return dtype, fill_value
def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None):
""" a safe version of put mask that (potentially upcasts the result
return the result
if change is not None, then MUTATE the change (and change the dtype)
return a changed flag
"""
if mask.any():
other = _maybe_cast_scalar(result.dtype, other)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_other = result.values.copy()
new_other[mask] = om_at
result[:] = new_other
return result, False
except:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, fill_value = _maybe_upcast(
result, fill_value=other, dtype=dtype, copy=True)
np.putmask(r, mask, other)
# we need to actually change the dtype here
if change is not None:
# if we are trying to do something unsafe
# like put a bigger dtype in a smaller one, use the smaller one
# pragma: no cover
if change.dtype.itemsize < r.dtype.itemsize:
raise AssertionError(
"cannot change dtype of input to smaller size")
change.dtype = r.dtype
change[:] = r
return r, True
# we want to decide whether putmask will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibily) otherwise we DON't want to upcast (e.g. if we are
# have values, say integers in the success portion then its ok to not
# upcast)
new_dtype, fill_value = _maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (np.isscalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isnull(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isnull(other[mask]).any():
return changeit()
try:
np.putmask(result, mask, other)
except:
return changeit()
return result, False
def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explict type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = _maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def _possibly_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
def _possibly_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if np.isscalar(result):
return result
trans = lambda x: x
if isinstance(dtype, compat.string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
trans = lambda x: x.round()
else:
dtype = 'object'
if isinstance(dtype, compat.string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape):
return result
if issubclass(dtype.type, np.floating):
return result.astype(dtype)
elif dtype == np.bool_ or issubclass(dtype.type, np.integer):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if not np.allclose(arr, trans(arr).astype(dtype)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notnull(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result):
return new_result
except:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
# a datetimelike
elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']:
try:
result = result.astype(dtype)
except:
pass
except:
pass
return result
def _lcd_dtypes(a_dtype, b_dtype):
""" return the lcd dtype to hold these types """
if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):
return _NS_DTYPE
elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):
return _TD_DTYPE
elif is_complex_dtype(a_dtype):
if is_complex_dtype(b_dtype):
return a_dtype
return np.float64
elif is_integer_dtype(a_dtype):
if is_integer_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
return np.int64
return np.float64
elif is_float_dtype(a_dtype):
if is_float_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
else:
return np.float64
elif is_integer(b_dtype):
return np.float64
return np.object
def _fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is not None:
if name.startswith('r'):
x,y = y,x
if not isinstance(y, np.ndarray):
dtype, value = _infer_dtype_from_scalar(y)
y = pa.empty(result.shape, dtype=dtype)
y.fill(value)
if is_integer_dtype(y):
if (y.ravel() == 0).any():
shape = result.shape
result = result.ravel().astype('float64')
# GH 7325, mask and nans must be broadcastable
signs = np.sign(result)
mask = ((y == 0) & ~np.isnan(x)).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it
# correctly
# GH 6178
if np.isinf(fill):
np.putmask(result,(signs<0) & mask, -fill)
result = result.reshape(shape)
return result
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,
np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def _clean_interp_method(method, order=None, **kwargs):
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial',
'krogh', 'piecewise_polynomial',
'pchip', 'spline']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {0}."
"Got '{1}' instead.".format(valid, method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
fill_value=None, bounds_error=False, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isnull(yvalues)
valid = ~invalid
valid_y = yvalues[valid]
valid_x = xvalues[valid]
new_x = xvalues[invalid]
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
def _interp_limit(invalid, limit):
"""mask off values that won't be filled since they exceed the limit"""
all_nans = np.where(invalid)[0]
violate = [invalid[x:x + limit + 1] for x in all_nans]
violate = np.array([x.all() & (x.size > limit) for x in violate])
return all_nans[violate] + limit
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
if limit:
violate_limit = _interp_limit(invalid, limit)
if valid.any():
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
result = yvalues.copy()
if valid.all():
return yvalues
else:
# have to call np.array(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.array(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(pa.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
inds = inds[firstIndex:]
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
yvalues[firstIndex:][valid])
if limit:
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'piecewise_polynomial', 'pchip']
if method in sp_methods:
new_x = new_x[firstIndex:]
xvalues = xvalues[firstIndex:]
result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
valid_x, valid_y, new_x, method=method, fill_value=fill_value,
bounds_error=bounds_error, **kwargs)
if limit:
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
from pandas import DatetimeIndex
except ImportError:
raise ImportError('{0} interpolation requires Scipy'.format(method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x.values.astype('i8'), new_x.astype('i8')
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
if method == 'pchip':
raise ImportError("Your version of scipy does not support "
"PCHIP interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
terp = interpolate.UnivariateSpline(x, y, k=order)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x)
return new_y
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = _clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def _get_fill_func(method):
method = _clean_fill_method(method)
return _fill_methods[method]
#----------------------------------------------------------------------
# Lots of little utilities
def _validate_date_like_dtype(dtype):
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('%s' % e)
if typ != 'generic' and typ != 'ns':
raise ValueError('%r is too specific of a frequency, try passing %r'
% (dtype.name, dtype.type.__name__))
def _invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for ``DataFrame.select_dtypes()``."""
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def _get_dtype_from_object(dtype):
"""Get a numpy dtype.type-style object.
Notes
-----
If nothing can be found, returns ``object``.
"""
# type object from a dtype
if isinstance(dtype, type) and issubclass(dtype, np.generic):
return dtype
elif isinstance(dtype, np.dtype): # dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# should still pass if we don't have a datelike
pass
return dtype.type
elif isinstance(dtype, compat.string_types):
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
elif dtype == 'category':
return CategoricalDtypeType
try:
return _get_dtype_from_object(getattr(np, dtype))
except AttributeError:
# handles cases like _get_dtype(int)
# i.e., python objects that are valid dtypes (unlike user-defined
# types, in general)
pass
return _get_dtype_from_object(np.dtype(dtype))
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
raise TypeError('object of type %r has no info axis' %
type(obj).__name__)
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, np.datetime64):
value = tslib.Timestamp(value)
elif isinstance(value, np.timedelta64):
pass
return value
_values_from_object = lib.values_from_object
def _possibly_convert_objects(values, convert_dates=True,
convert_numeric=True,
convert_timedeltas=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = _possibly_cast_to_datetime(
values, 'M8[ns]', coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
values = _possibly_cast_to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
except:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
return values
def _possibly_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M' or kind == 'm':
return arr.dtype in _DATELIKE_DTYPES
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def _possibly_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(values)
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, 'values'):
values = values.values
values = lib.maybe_convert_objects(values)
return values
def _possibly_cast_to_datetime(value, dtype, coerce=False):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_timedelta64:
# force the dtype if needed
if is_datetime64 and dtype != _NS_DTYPE:
if dtype.name == 'datetime64[ns]':
dtype = _NS_DTYPE
else:
raise TypeError(
"cannot convert datetimelike to dtype [%s]" % dtype)
elif is_timedelta64 and dtype != _TD_DTYPE:
if dtype.name == 'timedelta64[ns]':
dtype = _TD_DTYPE
else:
raise TypeError(
"cannot convert timedeltalike to dtype [%s]" % dtype)
if np.isscalar(value):
if value == tslib.iNaT or isnull(value):
value = tslib.iNaT
else:
value = np.array(value,copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = tslib.iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) and value.dtype != dtype:
try:
if is_datetime64:
from pandas.tseries.tools import to_datetime
value = to_datetime(value, coerce=coerce).values
elif is_timedelta64:
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
value = _possibly_cast_to_timedelta(value, coerce='compat', dtype=dtype)
except:
pass
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if (is_array and value.dtype.kind in ['M','m']):
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
value = _possibly_cast_to_timedelta(value, coerce='compat')
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif (is_array and not (
issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
pass
# try to infer if we have a datetimelike here
# otherwise pass thru
else:
value = _possibly_infer_to_datetimelike(value)
return value
def _possibly_infer_to_datetimelike(value):
# we might have a array (or single object) that is datetime like,
# and no dtype is passed don't change the value unless we find a
# datetime/timedelta set
# this is pretty strict in that a datetime/timedelta is REQUIRED
# in addition to possible nulls/string likes
# ONLY strings are NOT datetimelike
v = value
if not is_list_like(v):
v = [v]
v = np.array(v,copy=False)
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if len(v):
def _try_datetime(v):
# safe coerce to datetime64
try:
return tslib.array_to_datetime(v, raise_=True).reshape(shape)
except:
return v
def _try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas.tseries.timedeltas import to_timedelta
try:
return to_timedelta(v).values.reshape(shape)
except:
# this is for compat with numpy < 1.7
# but string-likes will fail here
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
try:
return _possibly_cast_to_timedelta(v, coerce='compat').reshape(shape)
except:
return v
# do a quick inference for perf
sample = v[:min(3,len(v))]
inferred_type = lib.infer_dtype(sample)
if inferred_type in ['datetime', 'datetime64']:
value = _try_datetime(v)
elif inferred_type in ['timedelta', 'timedelta64']:
value = _try_timedelta(v)
# its possible to have nulls intermixed within the datetime or timedelta
# these will in general have an inferred_type of 'mixed', so have to try
# both datetime and timedelta
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
elif inferred_type in ['mixed']:
if lib.is_possible_datetimelike_array(_ensure_object(v)):
value = _try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = _try_datetime(v)
return value
def _is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import Int64Index
values = np.arange(n, dtype=np.int64)
result = values.view(Int64Index)
result.name = None
result.is_unique = True
return result
def ensure_float(arr):
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
raise TypeError('mutually exclusive arguments: %r and %r' %
(label1, label2))
elif val1 is not None:
return val1
else:
return val2
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def _count_not_none(*args):
return sum(x is not None for x in args)
#------------------------------------------------------------------------------
# miscellaneous python tools
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
from random import Random
import string
return ''.join(Random().sample(string.ascii_letters + string.digits, n))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def iterpairs(seq):
"""
Parameters
----------
seq: sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
"""
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple))
or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
def _get_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
if isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
return arr_or_dtype.dtype
def _get_dtype_type(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
if isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype).type
return arr_or_dtype.dtype.type
def _is_any_int_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.integer)
def is_integer_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def _is_int_or_datetime_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_datetime64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.datetime64)
def is_datetime64_ns_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype)
return tipo == _NS_DTYPE
def is_timedelta64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.timedelta64)
def is_timedelta64_ns_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return tipo == _TD_DTYPE
def _is_datetime_or_timedelta_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, (np.datetime64, np.timedelta64))
needs_i8_conversion = _is_datetime_or_timedelta_dtype
def is_numeric_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, (np.number, np.bool_))
and not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_float_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
def _is_floating_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return isinstance(tipo, np.floating)
def is_bool_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.bool_)
def is_categorical_dtype(arr_or_dtype):
if hasattr(arr_or_dtype,'dtype'):
arr_or_dtype = arr_or_dtype.dtype
if isinstance(arr_or_dtype, CategoricalDtype):
return True
try:
return arr_or_dtype == 'category'
except:
return False
def is_complex_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.complexfloating)
def is_object_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.object_)
def is_re(obj):
return isinstance(obj, re._pattern_type)
def is_re_compilable(obj):
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(arg):
return (hasattr(arg, '__iter__') and
not isinstance(arg, compat.string_and_binary_types))
def _is_sequence(x):
try:
iter(x)
len(x) # it has a length
return not isinstance(x, compat.string_and_binary_types)
except (TypeError, AttributeError):
return False
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
_ensure_float64 = algos.ensure_float64
_ensure_float32 = algos.ensure_float32
_ensure_int64 = algos.ensure_int64
_ensure_int32 = algos.ensure_int32
_ensure_int16 = algos.ensure_int16
_ensure_int8 = algos.ensure_int8
_ensure_platform_int = algos.ensure_platform_int
_ensure_object = algos.ensure_object
def _astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return arr.astype(object)
# in py3, timedelta64[ns] are int64
elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not compat.PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
if dtype.kind == 'm':
mask = isnull(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if np.isnan(arr).any():
raise ValueError('Cannot convert NA to integer')
elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):
# work around NumPy brokenness, #1987
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
elif issubclass(dtype.type, compat.text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel()).reshape(arr.shape)
elif issubclass(dtype.type, compat.string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
if copy:
return arr.astype(dtype)
return arr.view(dtype)
def _clean_fill_method(method):
if method is None:
return None
method = method.lower()
if method == 'ffill':
method = 'pad'
if method == 'bfill':
method = 'backfill'
if method not in ['pad', 'backfill']:
msg = ('Invalid fill method. Expecting pad (ffill) or backfill '
'(bfill). Got %s' % method)
raise ValueError(msg)
return method
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def read(self, bytes=-1):
return self.reader.read(bytes).encode('utf-8')
def readline(self):
return self.reader.readline().encode('utf-8')
def next(self):
return next(self.reader).encode("utf-8")
# Python 3 iterator
__next__ = next
def _get_handle(path, mode, encoding=None, compression=None):
"""Gets file handle for given path and mode.
NOTE: Under Python 3.2, getting a compressed file handle means reading in
the entire file, decompressing it and decoding it to ``str`` all at once
and then wrapping it in a StringIO.
"""
if compression is not None:
if encoding is not None and not compat.PY3:
msg = 'encoding + compression not yet supported in Python 2'
raise ValueError(msg)
if compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
if compat.PY3_2:
# gzip and bz2 don't work with TextIOWrapper in 3.2
encoding = encoding or get_option('display.encoding')
f = StringIO(f.read().decode(encoding))
elif compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
else:
if compat.PY3:
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors='replace')
else:
f = open(path, mode)
return f
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
# python 3 iterator
__next__ = next
def __iter__(self): # pragma: no cover
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def _concat_compat(to_concat, axis=0):
# filter empty arrays
nonempty = [x for x in to_concat if x.shape[axis] > 0]
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
if nonempty:
is_datetime64 = [x.dtype == _NS_DTYPE for x in nonempty]
if all(is_datetime64):
# work around NumPy 1.6 bug
new_values = np.concatenate([x.view(np.int64) for x in nonempty],
axis=axis)
return new_values.view(_NS_DTYPE)
elif any(is_datetime64):
to_concat = [_to_pydatetime(x) for x in nonempty]
return np.concatenate(to_concat, axis=axis)
def _to_pydatetime(x):
if x.dtype == _NS_DTYPE:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel())
x = x.reshape(shape)
return x
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
import pandas.tslib as tslib
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main()
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
DEPRECATED: This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', "")
)
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
DEPRECATED: This is no longer used in pandas, and won't work in IPython 3
and above.
"""
try:
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', "")
)
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython()
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) If you need to send something to the console, use console_encode().
#
# console_encode() should (hopefully) choose the right encoding for you
# based on the encoding set in option "display.encoding"
#
# 3) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("set([%s])")
else:
fmt = u("[%s]") if hasattr(seq, '__setitem__') else u("(%s)")
nitems = get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(next(s), _nest_lvl + 1, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt % body
def _pprint_dict(seq, _nest_lvl=0, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{%s}")
pairs = []
pfmt = u("%s: %s")
nitems = get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, **kwds),
pprint_thing(v, _nest_lvl + 1, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
else:
return fmt % ", ".join(pairs)
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t',
'\n': r'\n',
'\r': r'\r',
}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True)
elif _is_sequence(thing) and _nest_lvl < \
get_option("display.pprint_nest_depth"):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = "'%s'"
else:
fmt = "u'%s'"
result = fmt % as_escaped_unicode(thing)
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
def console_encode(object, **kwds):
"""
this is the sanctioned way to prepare something for
sending *to the console*, it delegates to pprint_thing() to get
a unicode representation of the object relies on the global encoding
set in display.encoding. Use this everywhere
where you output to the console.
"""
return pprint_thing_encoded(object,
get_option("display.encoding"))
def load(path): # TODO remove in 0.13
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Warning: Loading pickled data received from untrusted sources can be
unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
import warnings
warnings.warn("load is deprecated, use read_pickle", FutureWarning)
from pandas.io.pickle import read_pickle
return read_pickle(path)
def save(obj, path): # TODO remove in 0.13
"""
Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path
"""
import warnings
warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning)
from pandas.io.pickle import to_pickle
return to_pickle(obj, path)
def _maybe_match_name(a, b):
a_name = getattr(a, 'name', None)
b_name = getattr(b, 'name', None)
if a_name == b_name:
return a_name
return None
|
the-stack_106_16670
|
# -*- coding: utf-8 -*-
"""
log
~~~
Implements color logger
:author: Feei <[email protected]>
:homepage: https://github.com/WhaleShark-Team/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2018 Feei. All rights reserved
"""
import os
import sys
import re
import subprocess
import logging
import cloghandler
# stream handle
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
#
logger = logging.getLogger('CobraLog')
log_path = 'logs'
if os.path.isdir(log_path) is not True:
os.mkdir(log_path, 0o755)
logfile = os.path.join(log_path, 'cobra.log')
fh_format = logging.Formatter("[%(asctime)s] %(levelname)s [%(lineno)s] %(message)s")
sh_format = logging.Formatter("\r[%(asctime)s] [%(levelname)s] %(message)s", "%H:%M:%S")
UNICODE_ENCODING = "utf8"
try:
mswindows = subprocess.mswindows
except AttributeError as e:
mswindows = False
def single_time_warn_message(message): # Cross-linked function
sys.stdout.write(message)
sys.stdout.write("\n")
sys.stdout.flush()
def stdout_encode(data):
try:
data = data or ""
# Reference: http://bugs.python.org/issue1602
if mswindows:
output = data.encode(sys.stdout.encoding, "replace")
if '?' in output and '?' not in data:
warn = "cannot properly display Unicode characters "
warn += "inside Windows OS command prompt "
warn += "(http://bugs.python.org/issue1602). All "
warn += "unhandled occurances will result in "
warn += "replacement with '?' character. Please, find "
warn += "proper character representation inside "
warn += "corresponding output files. "
single_time_warn_message(warn)
ret = output
else:
ret = data.encode(sys.stdout.encoding)
except Exception as e:
ret = data.encode(UNICODE_ENCODING) if isinstance(data, unicode) else data
return ret
if mswindows:
import ctypes
import ctypes.wintypes
# Reference: https://gist.github.com/vsajip/758430
# https://github.com/ipython/ipython/issues/4252
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms686047%28v=vs.85%29.aspx
ctypes.windll.kernel32.SetConsoleTextAttribute.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD]
ctypes.windll.kernel32.SetConsoleTextAttribute.restype = ctypes.wintypes.BOOL
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'green', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
}
csi = '\x1b['
reset = '\x1b[0m'
disable_coloring = False
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty() and not self.disable_coloring
def emit(self, record):
try:
message = stdout_encode(self.format(record))
stream = self.stream
if not self.is_tty:
if message and message[0] == "\r":
message = message[1:]
if sys.version > '3':
message = message.decode()
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except IOError:
pass
except Exception as e:
self.handleError(record)
if not mswindows:
def output_colorized(self, message):
self.stream.write(message.decode('utf-8'))
else:
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message):
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)
def colorize(self, message, record):
if record.levelno in self.level_map and self.is_tty:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params and message:
if message.lstrip() != message:
prefix = re.search(r"\s+", message).group(0)
message = message[len(prefix):]
else:
prefix = ""
message = "%s%s" % (prefix, ''.join((self.csi, ';'.join(params),
'm', message, self.reset)))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
return self.colorize(message, record)
try:
sh = ColorizingStreamHandler(sys.stdout)
except ImportError:
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(sh_format)
logger.addHandler(sh)
# file handle
fh = cloghandler.ConcurrentRotatingFileHandler(logfile, maxBytes=(1048576 * 5), backupCount=7)
fh.setFormatter(fh_format)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
|
the-stack_106_16671
|
import os
def solve():
filepath = os.path.join(os.path.dirname(__file__), '042_words.txt')
with open(filepath) as f:
word_value_list = (sum([(ord(i) - 96) for i in word]) for word in f.read().replace('"', '').lower().split(','))
triangle_numbers = [n * (n + 1) / 2 for n in range(1, 1000)]
return sum(wv in triangle_numbers for wv in word_value_list)
if __name__ == '__main__':
print(solve())
|
the-stack_106_16672
|
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. Remove the master from the links in
the new models of the README:
(https://huggingface.co/transformers/master/model_doc/ -> https://huggingface.co/transformers/model_doc/)
then run `make fix-copies` to fix the index of the documentation.
2. Unpin specific versions from setup.py that use a git install.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Add the release version to docs/source/_static/js/custom.js and .circleci/deploy.sh
9. Update README.md to redirect to correct documentation.
"""
import os
import shutil
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
extras = {}
extras["ja"] = ["fugashi>=1.0", "ipadic>=1.0.0,<2.0", "unidic_lite>=1.0.7", "unidic>=1.0.2"]
extras["sklearn"] = ["scikit-learn"]
# keras2onnx and onnxconverter-common version is specific through a commit until 1.7.0 lands on pypi
extras["tf"] = [
"tensorflow>=2.0",
"onnxconverter-common",
"keras2onnx"
# "onnxconverter-common @ git+git://github.com/microsoft/onnxconverter-common.git@f64ca15989b6dc95a1f3507ff6e4c395ba12dff5#egg=onnxconverter-common",
# "keras2onnx @ git+git://github.com/onnx/keras-onnx.git@cbdc75cb950b16db7f0a67be96a278f8d2953b48#egg=keras2onnx",
]
extras["tf-cpu"] = [
"tensorflow-cpu>=2.0",
"onnxconverter-common",
"keras2onnx"
# "onnxconverter-common @ git+git://github.com/microsoft/onnxconverter-common.git@f64ca15989b6dc95a1f3507ff6e4c395ba12dff5#egg=onnxconverter-common",
# "keras2onnx @ git+git://github.com/onnx/keras-onnx.git@cbdc75cb950b16db7f0a67be96a278f8d2953b48#egg=keras2onnx",
]
extras["torch"] = ["torch>=1.0"]
if os.name == "nt": # windows
extras["retrieval"] = ["datasets"] # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = ["faiss-cpu", "datasets"]
extras["flax"] = ["jaxlib==0.1.55", "jax>=0.2.0", "flax==0.2.2"]
extras["tokenizers"] = ["tokenizers==0.9.2"]
extras["onnxruntime"] = ["onnxruntime>=1.4.0", "onnxruntime-tools>=1.4.2"]
extras["serving"] = ["pydantic", "uvicorn", "fastapi", "starlette"]
extras["sentencepiece"] = ["sentencepiece==0.1.91"]
extras["retrieval"] = ["faiss-cpu", "datasets"]
extras["testing"] = ["pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil"] + extras["retrieval"]
# sphinx-rtd-theme==0.5.0 introduced big changes in the style.
extras["docs"] = ["recommonmark", "sphinx==3.2.1", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton"]
extras["quality"] = ["black >= 20.8b1", "isort >= 5.5.4", "flake8 >= 3.8.3"]
extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]
extras["dev"] = extras["all"] + extras["testing"] + extras["quality"] + extras["ja"] + extras["docs"] + extras["sklearn"]
setup(
name="transformers",
version="3.4.0",
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="[email protected]",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
install_requires=[
"numpy",
"tokenizers == 0.9.3",
# dataclasses for Python versions that don't have it
"dataclasses;python_version<'3.7'",
# utilities from PyPA to e.g. compare versions
"packaging",
# filesystem locks e.g. to prevent parallel downloads
"filelock",
# for downloading models over HTTPS
"requests",
# progress bars in model download and training scripts
"tqdm >= 4.27",
# for OpenAI GPT
"regex != 2019.12.17",
# for SentencePiece models
"sentencepiece == 0.1.91",
"protobuf",
# for XLM
"sacremoses",
],
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
the-stack_106_16673
|
import glob
import os
import shutil
from conans import ConanFile, CMake, tools
class CjsonConan(ConanFile):
name = "cjson"
description = "Ultralightweight JSON parser in ANSI C."
license = "MIT"
topics = ("conan", "cjson", "json", "parser")
homepage = "https://github.com/DaveGamble/cJSON"
url = "https://github.com/conan-io/conan-center-index"
exports_sources = "CMakeLists.txt"
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"use_locales": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"use_locales": True
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("cJSON-" + self.version, self._source_subfolder)
def build(self):
cmake = self._configure_cmake()
cmake.build()
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["ENABLE_SANITIZERS"] = False
self._cmake.definitions["ENABLE_SAFE_STACK"] = False
self._cmake.definitions["ENABLE_PUBLIC_SYMBOLS"] = True
self._cmake.definitions["ENABLE_HIDDEN_SYMBOLS"] = False
self._cmake.definitions["ENABLE_TARGET_EXPORT"] = False
self._cmake.definitions["BUILD_SHARED_AND_STATIC_LIBS"] = False
self._cmake.definitions["CJSON_OVERRIDE_BUILD_SHARED_LIBS"] = False
self._cmake.definitions["ENABLE_CJSON_UTILS"] = False
self._cmake.definitions["ENABLE_CJSON_TEST"] = False
self._cmake.definitions["ENABLE_LOCALES"] = self.options.use_locales
self._cmake.definitions["ENABLE_FUZZING"] = False
# Disable Custom Compiler Flags for MingW on Windows, because it uses -fstack-protector-strong
self._cmake.definitions["ENABLE_CUSTOM_COMPILER_FLAGS"] = not (self.settings.os == "Windows" and self.settings.compiler == "gcc")
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
self._move_dll_to_bin_folder()
def _move_dll_to_bin_folder(self):
if self.settings.os == "Windows" and self.options.shared:
bin_dir = os.path.join(self.package_folder, "bin")
if not os.path.exists(bin_dir):
os.mkdir(bin_dir)
for dll_file in glob.glob(os.path.join(self.package_folder, "lib", "*.dll")):
shutil.move(dll_file, bin_dir)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Linux":
self.cpp_info.system_libs.append("m")
|
the-stack_106_16675
|
import comet_ml
import torch
import torch.nn as NN
import torch.nn.functional as F
import torch.utils.data as data_utils
import deepracing_models.data_loading.proto_datasets as PD
from tqdm import tqdm as tqdm
import deepracing_models.nn_models.LossFunctions as loss_functions
import deepracing_models.nn_models.Models
import numpy as np
import torch.optim as optim
import pickle
from datetime import datetime
import os
import string
import argparse
import torchvision.transforms as transforms
import yaml
import shutil
import skimage
import skimage.io
import deepracing
from deepracing import trackNames
import deepracing.backend
import imageio
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import deepracing_models.math_utils.bezier
import socket
import json
from comet_ml.api import API, APIExperiment
import cv2
import torchvision, torchvision.transforms as T
from deepracing_models.data_loading.image_transforms import GaussianBlur
#torch.backends.cudnn.enabled = False
def run_epoch(experiment, network, optimizer, dataloader, ego_agent_loss, config, use_tqdm = False, debug=False):
cum_loss = 0.0
cum_param_loss = 0.0
cum_position_loss = 0.0
cum_velocity_loss = 0.0
num_samples=0.0
if use_tqdm:
t = tqdm(enumerate(dataloader), total=len(dataloader))
else:
t = enumerate(dataloader)
network.train() # This is important to call before training!
dataloaderlen = len(dataloader)
dev = next(network.parameters()).device # we are only doing single-device training for now, so this works fine.
loss_weights = config["loss_weights"]
bezier_order = network.bezier_order
for (i, scandict) in t:
scans = scandict["scans"].double().to(device=dev)
ego_current_pose = scandict["ego_current_pose"].double().to(device=dev)
ego_positions = scandict["ego_positions"].double().to(device=dev)
ego_velocities = scandict["ego_velocities"].double().to(device=dev)
session_times = scandict["session_times"].double().to(device=dev)
raceline = scandict["raceline"].double().to(device=dev)
batch_size = scans.shape[0]
predictions = network(scans)
# dt = session_times[:,-1]-session_times[:,0]
# s_torch_cur = (session_times - session_times[:,0,None])/dt[:,None]
s_torch_cur = torch.linspace(0.0, 1.0, steps=session_times.shape[1], dtype=torch.float64, device=dev).expand(batch_size,-1)
Mpos = deepracing_models.math_utils.bezierM(s_torch_cur, bezier_order)
# print(predictions.shape)
# print(s_torch_cur.shape)
# print(Mpos.shape)
pred_points = torch.matmul(Mpos, predictions)
# Mvel, pred_vel_s = deepracing_models.math_utils.bezier.bezierDerivative(predictions, t = s_torch_cur, order=1)
# pred_vel_t = pred_vel_s/dt[:,None,None]
# loss = ego_agent_loss(pred_points, ego_positions)
loss = ego_agent_loss(pred_points, raceline)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if debug and False:
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
images_np = np.round(255.0*input_images[0].detach().cpu().numpy().copy().transpose(0,2,3,1)).astype(np.uint8)
#image_np_transpose=skimage.util.img_as_ubyte(images_np[-1].transpose(1,2,0))
# oap = other_agent_positions[other_agent_positions==other_agent_positions].view(1,-1,60,2)
# print(oap)
ims = []
for i in range(images_np.shape[0]):
ims.append([ax1.imshow(images_np[i])])
ani = animation.ArtistAnimation(fig, ims, interval=250, blit=True, repeat=True)
_, controlpoints_fit = deepracing_models.math_utils.bezier.bezierLsqfit(ego_positions, bezier_order, M = Mpos)
fit_points = torch.matmul(Mpos, controlpoints_fit)
gt_points_np = ego_positions[0].detach().cpu().numpy().copy()
pred_points_np = pred_points[0].detach().cpu().numpy().copy()
pred_control_points_np = predictions[0].detach().cpu().numpy().copy()
fit_points_np = fit_points[0].cpu().numpy().copy()
fit_control_points_np = controlpoints_fit[0].cpu().numpy().copy()
ymin = np.min(np.hstack([gt_points_np[:,1], fit_points_np[:,1], pred_points_np[:,1] ]))-0.05
ymax = np.max(np.hstack([gt_points_np[:,1], fit_points_np[:,1], pred_points_np[:,1] ]))+0.05
xmin = np.min(np.hstack([gt_points_np[:,0], fit_points_np[:,0] ])) - 0.05
xmax = np.max(np.hstack([gt_points_np[:,0], fit_points_np[:,0] ]))
ax2.set_xlim(ymax,ymin)
ax2.set_ylim(xmin,xmax)
ax2.plot(gt_points_np[:,1],gt_points_np[:,0],'g+', label="Ground Truth Waypoints")
ax2.plot(fit_points_np[:,1],fit_points_np[:,0],'b-', label="Best-fit Bézier Curve")
ax2.plot(pred_points_np[:,1],pred_points_np[:,0],'r-', label="Predicted Bézier Curve")
plt.legend()
plt.show()
if not debug:
experiment.log_metric("current_position_loss", loss)
if use_tqdm:
t.set_postfix({"current_position_loss" : float(loss.item())})
def go():
parser = argparse.ArgumentParser(description="Train AdmiralNet Pose Predictor")
parser.add_argument("dataset_config_file", type=str, help="Dataset Configuration file to load")
parser.add_argument("model_config_file", type=str, help="Model Configuration file to load")
parser.add_argument("output_directory", type=str, help="Where to put the resulting model files")
parser.add_argument("--debug", action="store_true", help="Display images upon each iteration of the training loop")
parser.add_argument("--model_load", type=str, default=None, help="Load this model file prior to running. usually in conjunction with debug")
parser.add_argument("--models_to_disk", action="store_true", help="Save the model files to disk in addition to comet.ml")
parser.add_argument("--tqdm", action="store_true", help="Display tqdm progress bar on each epoch")
parser.add_argument("--gpu", type=int, default=None, help="Override the GPU index specified in the config file")
args = parser.parse_args()
dataset_config_file = args.dataset_config_file
debug = args.debug
model_load = args.model_load
models_to_disk = args.models_to_disk
use_tqdm = args.tqdm
with open(dataset_config_file) as f:
dataset_config = yaml.load(f, Loader = yaml.SafeLoader)
config_file = args.model_config_file
with open(config_file) as f:
config = yaml.load(f, Loader = yaml.SafeLoader)
print(dataset_config)
context_length = config["context_length"]
bezier_order = config["bezier_order"]
batch_size = config["batch_size"]
learning_rate = config["learning_rate"]
momentum = config["momentum"]
dampening = config["dampening"]
nesterov = config["nesterov"]
project_name = config["project_name"]
if args.gpu is not None:
gpu = args.gpu
config["gpu"] = gpu
else:
gpu = config["gpu"]
torch.cuda.set_device(gpu)
num_epochs = config["num_epochs"]
num_workers = config["num_workers"]
hidden_dim = config["hidden_dimension"]
input_features = config["input_features"]
num_recurrent_layers = config.get("num_recurrent_layers",1)
config["hostname"] = socket.gethostname()
print("Using config:\n%s" % (str(config)))
net = deepracing_models.nn_models.Models.LinearRecursionCurvePredictor(input_features, hidden_dimension=hidden_dim, bezier_order=bezier_order)
print("net:\n%s" % (str(net)))
ego_agent_loss = deepracing_models.nn_models.LossFunctions.SquaredLpNormLoss()
print("casting stuff to double")
net = net.double()
ego_agent_loss = ego_agent_loss.double()
if model_load is not None:
net.load_state_dict(torch.load(model_load, map_location=torch.device("cpu")))
if gpu>=0:
print("moving stuff to GPU")
device = torch.device("cuda:%d" % gpu)
net = net.cuda(gpu)
ego_agent_loss = ego_agent_loss.cuda(gpu)
else:
device = torch.device("cpu")
optimizer = optim.SGD(net.parameters(), lr = learning_rate, momentum = momentum, dampening=dampening, nesterov=nesterov)
#image_wrapper = deepracing.backend.ImageFolderWrapper(os.path.dirname(image_db))
dsets=[]
dsetfolders = []
alltags = set(dataset_config.get("tags",[]))
dset_output_lengths=[]
return_other_agents = bool(dataset_config.get("other_agents",False))
for dataset in dataset_config["datasets"]:
dlocal : dict = {k: dataset_config[k] for k in dataset_config.keys() if (not (k in ["datasets"]))}
dlocal.update(dataset)
print("Parsing database config: %s" %(str(dlocal)))
root_folder = dlocal["root_folder"]
position_indices = dlocal["position_indices"]
dataset_tags = dlocal.get("tags", [])
alltags = alltags.union(set(dataset_tags))
dsetfolders.append(root_folder)
scan_folder = os.path.join(root_folder,"laser_scans")
label_folder = os.path.join(root_folder,"laser_scan_labels")
key_file = os.path.join(label_folder,"goodkeys.txt")
with open(key_file,"r") as f:
keys = [l.replace("\n","") for l in f.readlines()]
keys = [k for k in keys if not k==""]
numkeys = len(keys)
scan_wrapper = deepracing.backend.LaserScanLMDBWrapper()
scan_wrapper.openDatabase( os.path.join(scan_folder,"lmdb"), mapsize=38000*numkeys )
label_wrapper = deepracing.backend.MultiAgentLabelLMDBWrapper()
label_wrapper.openDatabase(os.path.join(label_folder,"lmdb"), mapsize=17000*numkeys )
current_dset = PD.LaserScanDataset(scan_wrapper, label_wrapper, keys, context_length, position_indices, return_other_agents=return_other_agents)
dsets.append(current_dset)
print("\n")
if len(dsets)==1:
dset = dsets[0]
else:
dset = torch.utils.data.ConcatDataset(dsets)
dataloader = data_utils.DataLoader(dset, batch_size=batch_size,
shuffle=True, num_workers=num_workers, pin_memory=gpu>=0)
print("Dataloader of of length %d" %(len(dataloader)))
main_dir = args.output_directory
if debug:
output_directory = os.path.join(main_dir, "debug")
os.makedirs(output_directory, exist_ok=True)
experiment = None
else:
experiment = comet_ml.Experiment(workspace="electric-turtle", project_name=project_name)
output_directory = os.path.join(main_dir, experiment.get_key())
if os.path.isdir(output_directory) :
raise FileExistsError("%s already exists, this should not happen." %(output_directory) )
os.makedirs(output_directory)
experiment.log_parameters(config)
experiment.log_parameters(dataset_config)
dsetsjson = json.dumps(dataset_config, indent=1)
experiment.log_parameter("datasets",dsetsjson)
experiment.log_text(dsetsjson)
experiment.add_tag("bezierpredictor")
if len(alltags)>0:
experiment.add_tags(list(alltags))
experiment_config = {"experiment_key": experiment.get_key()}
yaml.dump(experiment_config, stream=open(os.path.join(output_directory,"experiment_config.yaml"),"w"), Dumper=yaml.SafeDumper)
yaml.dump(dataset_config, stream=open(os.path.join(output_directory,"dataset_config.yaml"), "w"), Dumper = yaml.SafeDumper)
yaml.dump(config, stream=open(os.path.join(output_directory,"model_config.yaml"), "w"), Dumper = yaml.SafeDumper)
experiment.log_asset(os.path.join(output_directory,"dataset_config.yaml"),file_name="datasets.yaml")
experiment.log_asset(os.path.join(output_directory,"experiment_config.yaml"),file_name="experiment_config.yaml")
experiment.log_asset(os.path.join(output_directory,"model_config.yaml"),file_name="model_config.yaml")
i = 0
#def run_epoch(experiment, net, optimizer, dataloader, raceline_loss, other_agent_loss, config)
if debug:
run_epoch(experiment, net, optimizer, dataloader, ego_agent_loss, config, debug=True, use_tqdm=True)
else:
netpostfix = "epoch_%d_params.pt"
optimizerpostfix = "epoch_%d_optimizer.pt"
with experiment.train():
while i < num_epochs:
time.sleep(2.0)
postfix = i + 1
if models_to_disk:
modelfile = netpostfix % (postfix-1)
optimizerfile = optimizerpostfix % (postfix-1)
else:
modelfile = "params.pt"
optimizerfile = "optimizer.pt"
print("Running Epoch Number %d" %(postfix))
#dset.clearReaders()
try:
tick = time.time()
run_epoch(experiment, net, optimizer, dataloader, ego_agent_loss, config, use_tqdm=use_tqdm)
tock = time.time()
print("Finished epoch %d in %f seconds." % ( postfix , tock-tick ) )
experiment.log_epoch_end(postfix)
except FileExistsError as e:
raise e
except Exception as e:
print("Restarting epoch %d because %s"%(postfix, str(e)))
modelin = os.path.join(output_directory, modelfile)
optimizerin = os.path.join(output_directory,optimizerfile)
net.load_state_dict(torch.load(modelin))
optimizer.load_state_dict(torch.load(optimizerin))
continue
modelout = os.path.join(output_directory,modelfile)
with open(modelout,'wb') as f:
torch.save(net.state_dict(), f)
with open(modelout,'rb') as f:
experiment.log_asset(f,file_name=netpostfix %(postfix,) )
optimizerout = os.path.join(output_directory, optimizerfile)
with open(optimizerout,'wb') as f:
torch.save(optimizer.state_dict(), f)
with open(optimizerout,'rb') as f:
experiment.log_asset(f,file_name=optimizerpostfix %(postfix,) )
i = i + 1
import logging
if __name__ == '__main__':
logging.basicConfig()
go()
|
the-stack_106_16676
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PlazaRoute documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 24 12:26:53 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_autodoc_typehints']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'plazaroute'
copyright = '2017, Jonas Matter, Robin Suter'
author = 'Jonas Matter, Robin Suter'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PlazaRoutedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PlazaRoute.tex', 'PlazaRoute Documentation',
'Jonas Matter, Robin Suter', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'plazaroute', 'PlazaRoute Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PlazaRoute', 'PlazaRoute Documentation',
author, 'PlazaRoute', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
the-stack_106_16678
|
"""Indent new lines automatically when Enter is pressed."""
from __future__ import annotations
import dataclasses
import logging
import re
import tkinter
from functools import partial
from typing import Optional, Tuple
from porcupine import get_tab_manager, tabs
# without this, pressing enter twice would strip all trailing whitespace
# from the blank line above the cursor, and then after_enter() wouldn't
# do anything
setup_before = ["rstrip"]
log = logging.getLogger(__name__)
ALT_FLAG = 0b1000
def leading_whitespace(string: str) -> str:
r"""Return leading whitespace characters. Ignores trailing '\n'.
>>> leading_whitespace('\t \t lel')
'\t \t '
>>> leading_whitespace(' \n')
' '
"""
count = len(string) - len(string.lstrip())
return string[:count].rstrip("\n")
@dataclasses.dataclass
class AutoIndentRegexes:
indent: Optional[str] = None
dedent: Optional[str] = None
def get_regexes(tab: tabs.FileTab) -> Tuple[str, str]:
config = tab.settings.get("autoindent_regexes", Optional[AutoIndentRegexes])
if config is None:
config = AutoIndentRegexes(None, None)
assert isinstance(config, AutoIndentRegexes)
if config.indent is not None:
try:
re.compile(config.indent)
except re.error:
log.warning(f"invalid indent regex: {config.indent}")
config.indent = None
if config.dedent is not None:
try:
re.compile(config.dedent)
except re.error:
log.warning(f"invalid dedent regex: {config.dedent}")
config.dedent = None
return (
config.indent or r"this regex matches nothing^",
config.dedent or r"this regex matches nothing^",
)
def after_enter(tab: tabs.FileTab, alt_pressed: bool) -> None:
lineno = int(tab.textwidget.index("insert").split(".")[0])
prevline = tab.textwidget.get(f"{lineno}.0 - 1 line", f"{lineno}.0")
# we can't strip trailing whitespace before this because then
# pressing enter twice would get rid of all indentation
tab.textwidget.insert("insert", leading_whitespace(prevline))
comment_prefix = tab.settings.get("comment_prefix", Optional[str])
if comment_prefix is None:
prevline = prevline.strip()
else:
# Not perfect, but should work fine
prevline = prevline.split(comment_prefix)[0].strip()
indent_regex, dedent_regex = get_regexes(tab)
if (
prevline.endswith(("(", "[", "{")) or re.fullmatch(indent_regex, prevline)
) and not alt_pressed:
tab.textwidget.indent("insert")
elif re.fullmatch(dedent_regex, prevline):
# must be end of a block
tab.textwidget.dedent("insert")
def on_enter_press(tab: tabs.FileTab, event: tkinter.Event[tkinter.Text]) -> None:
assert isinstance(event.state, int)
alt_pressed = bool(event.state & ALT_FLAG)
tab.textwidget.after_idle(after_enter, tab, alt_pressed)
def on_closing_brace(tab: tabs.FileTab, event: tkinter.Event[tkinter.Text]) -> None:
# Don't dedent when there's some garbage before cursor, other than comment
# prefix. It's handy to have autodedent working inside big comments with
# example code in them.
before_cursor = tab.textwidget.get("insert linestart", "insert")
before_cursor = before_cursor.replace(
tab.settings.get("comment_prefix", Optional[str]) or "", ""
)
if before_cursor.strip():
return
# Don't dedent when after_enter() has already dedented
if leading_whitespace(tab.textwidget.get("insert - 1 line", "insert - 1 line lineend")):
return
tab.textwidget.dedent("insert")
def on_new_tab(tab: tabs.Tab) -> None:
if isinstance(tab, tabs.FileTab):
tab.settings.add_option("autoindent_regexes", None, Optional[AutoIndentRegexes])
tab.textwidget.bind("<Return>", partial(on_enter_press, tab), add=True)
tab.textwidget.bind("<parenright>", partial(on_closing_brace, tab), add=True)
tab.textwidget.bind("<bracketright>", partial(on_closing_brace, tab), add=True)
tab.textwidget.bind("<braceright>", partial(on_closing_brace, tab), add=True)
def setup() -> None:
get_tab_manager().add_tab_callback(on_new_tab)
|
the-stack_106_16680
|
import os
# rlaunch = 'rlaunch --cpu=4 --memory=4096 --gpu=1 --preemptible=no '
datasets = ['cifar-10']
depths = [20]
gpu_id = '0'
batchsize = 256
epoch = 150
exp_dir = '/data/ouyangzhihao/Exp/ICNN/LearnableMask/tb_dir/learnable_mask_baseline/TwoStep_Algorithm2_Sigmoid10_Lmax'
# exp_dir = '/data/ouyangzhihao/Exp/ICNN/LearnableMask/tb_dir/learnable_mask_baseline/Debug'
res = exp_dir + 'res.txt'
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
print('run ', exp_dir.split('/')[-1])
# os.system('rm -r ' + exp_dir)
for data in datasets:
for depth in depths:
# cmd = rlaunch + '-- python3 ./train.py --dataset %s --depth %d --res %s --gpu-ids %s --batch_size %d --epoch %d --exp_dir %s' \
# %(data,depth,res,gpu_id,batchsize,epoch,exp_dir)
cmd = 'python3 ./train.py --dataset %s --depth %d --res %s --gpu-ids %s --batch_size %d --epoch %d --exp_dir %s' \
% (data, depth, res, gpu_id, batchsize, epoch, exp_dir)
os.system(cmd)
|
the-stack_106_16682
|
import pytest
from adventofcode2020.solutions.day08 import Day08PartB
class TestDay08PartB:
instruction_1 = "\n".join(
[
"nop +0",
"acc +1",
"jmp +4",
"acc +3",
"jmp -3",
"acc -99",
"acc +1",
"jmp -4",
"acc +6",
]
)
@pytest.mark.parametrize(("input_data", "expected_result"), [(instruction_1, 8)])
def test_day08b_solve(self, input_data, expected_result):
solution = Day08PartB()
result = solution.solve(input_data)
assert result == expected_result
def test_day08b_data(self):
"""Result we got when we did the real solution"""
solution = Day08PartB()
res = solution("day_08/day08.txt")
assert res == 2477
|
the-stack_106_16684
|
#!/usr/bin/env python3
import os
import sys
sys.path.append('./_model')
from env import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--distribution',
help='Policy Distribution',
type=str,
default='Normal',
required=False)
parser.add_argument(
'--maxExperiences',
help='Number of experiences to collect.',
type=int,
default=1e6,
required=False)
print("Running Mountaincart example with arguments:")
args = parser.parse_args()
print(args)
####### Defining Korali Problem
import korali
k = korali.Engine()
e = korali.Experiment()
### Defining the Cartpole problem's configuration
e["Problem"]["Type"] = "Reinforcement Learning / Continuous"
e["Problem"]["Environment Function"] = env
e["Problem"]["Actions Between Policy Updates"] = 1
e["Variables"][0]["Name"] = "Cart Position X"
e["Variables"][0]["Type"] = "State"
e["Variables"][1]["Name"] = "Cart Position Y"
e["Variables"][1]["Type"] = "State"
e["Variables"][2]["Name"] = "Cart Velocity X"
e["Variables"][2]["Type"] = "State"
e["Variables"][3]["Name"] = "Cart Velocity Y"
e["Variables"][3]["Type"] = "State"
e["Variables"][4]["Name"] = "Cart Acceleration X"
e["Variables"][4]["Type"] = "State"
e["Variables"][5]["Name"] = "Cart Acceleration Y"
e["Variables"][5]["Type"] = "State"
e["Variables"][6]["Name"] = "Force"
e["Variables"][6]["Type"] = "Action"
e["Variables"][6]["Lower Bound"] = -1.0
e["Variables"][6]["Upper Bound"] = +1.0
e["Variables"][6]["Initial Exploration Noise"] = 0.3
### Defining Agent Configuration
e["Solver"]["Type"] = "Agent / Continuous / VRACER"
e["Solver"]["Mode"] = "Training"
e["Solver"]["Experiences Between Policy Updates"] = 1
e["Solver"]["Episodes Per Generation"] = 1
e["Solver"]["Experience Replay"]["Start Size"] = 4096
e["Solver"]["Experience Replay"]["Maximum Size"] = 262144
e["Solver"]["Discount Factor"] = 0.995
e["Solver"]["Learning Rate"] = 0.0001
e["Solver"]["Mini Batch"]["Size"] = 256
e["Solver"]["State Rescaling"]["Enabled"] = True
e["Solver"]["Reward"]["Rescaling"]["Enabled"] = True
e["Solver"]["L2 Regularization"]["Enabled"] = False
### Configuring the neural network and its hidden layers
e["Solver"]["Policy"]["Distribution"] = args.distribution
e["Solver"]["Learning Rate"] = 1e-4
e["Solver"]["Neural Network"]["Engine"] = "OneDNN"
e["Solver"]["Neural Network"]["Optimizer"] = "Adam"
e["Solver"]["Neural Network"]["Hidden Layers"][0]["Type"] = "Layer/Linear"
e["Solver"]["Neural Network"]["Hidden Layers"][0]["Output Channels"] = 128
e["Solver"]["Neural Network"]["Hidden Layers"][1]["Type"] = "Layer/Activation"
e["Solver"]["Neural Network"]["Hidden Layers"][1]["Function"] = "Elementwise/Tanh"
e["Solver"]["Neural Network"]["Hidden Layers"][2]["Type"] = "Layer/Linear"
e["Solver"]["Neural Network"]["Hidden Layers"][2]["Output Channels"] = 128
e["Solver"]["Neural Network"]["Hidden Layers"][3]["Type"] = "Layer/Activation"
e["Solver"]["Neural Network"]["Hidden Layers"][3]["Function"] = "Elementwise/Tanh"
### Defining Termination Criteria
e["Solver"]["Termination Criteria"]["Max Experiences"] = args.maxExperiences
### Setting file output configuration
e["File Output"]["Enabled"] = True
e["File Output"]["Frequency"] = 500
e["File Output"]["Path"] = '_korali_results_{}'.format(e["Solver"]["Policy"]["Distribution"].replace(' ','_'))
### Running Experiment
k.run(e)
|
the-stack_106_16686
|
import os
import cv2
import torch.utils.data
from PIL import Image
import pandas as pd
import numpy as np
import json
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.config import cfg
class SYSUDataset(torch.utils.data.Dataset):
CLASSES = ("__background__ ", 'person') ###
def __init__(self, root, ann_file, split, transforms=None):
self.gallery_size = cfg.TEST.GALLERY_SIZE
self.root = root
self.anno = ann_file
self.split = split
self.transforms = transforms
self.anno_dir = 'data/sysu/annotations'
self.train_DF = 'data/sysu/SIPN_annotation/trainAllDF.csv'
self.test_DF = 'data/sysu/SIPN_annotation/testAllDF.csv'
self.query_DF = 'data/sysu/SIPN_annotation/queryDF.csv'
self.gallery = 'data/sysu/SIPN_annotation/q_to_g{}DF.csv'.format(self.gallery_size)
self.demo = False
if self.demo:
self.pid = 'pid_0.csv'
self.pid_file = os.path.join(self.anno_dir, 'pids', self.pid)
query_box = pd.read_csv(self.pid_file)
imname = query_box['imname']
self.ids = np.array(imname.squeeze()).tolist() # 's15533.jpg'
else:
with open(self.anno) as json_anno:
anno_dict = json.load(json_anno)
self.ids = [img['file_name'] for img in anno_dict['images']]
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
cls = SYSUDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
if self.split == 'train' or self.split == 'val':
self.all_boxes = pd.read_csv(self.train_DF)
if self.split == 'test':
self.all_boxes = pd.read_csv(self.test_DF)
if self.split == 'query':
self.all_boxes = pd.read_csv(self.query_DF)
# as you would do normally
def __getitem__(self, index):
# load the image as a PIL Image
img_id = self.ids[index]
im_path = os.path.join(self.root, img_id)
img = Image.open(im_path).convert("RGB")
target = self.get_groundtruth(index)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, index
def __len__(self):
return len(self.ids)
def get_groundtruth(self, index):
img_id = self.ids[index]
img_num = img_id.split('.')[0][1:]
if self.split == 'query':
boxes_df = self.all_boxes.query('pid==@index')
else:
boxes_df = self.all_boxes.query('imname==@img_id')
boxes = boxes_df.loc[:, 'x1': 'pid'].copy()
#boxes = boxes_df.copy()
boxes.loc[:, 'del_x'] += boxes.loc[:, 'x1']
boxes.loc[:, 'del_y'] += boxes.loc[:, 'y1']
boxes = boxes.values.astype(np.float32)
boxes = boxes.tolist()
anno = self._preprocess_annotation(img_num, boxes)
orig_shape = self.get_img_info(index)
(width, height) = orig_shape['width'], orig_shape['height']
target = BoxList(anno["boxes"], (width, height), mode="xyxy")
target.add_field("img", anno["img_name"])
target.add_field("labels", anno["labels"])
target.add_field("pid", anno["pid"])
target.add_field("difficult", anno["difficult"])
return target
def _preprocess_annotation(self, img, target):
boxes = []
gt_classes = []
pid = []
img_name = []
name = 'person'
difficult = False
difficult_boxes = []
for obj in target:
bndbox = tuple(
map(
int,
[
obj[0],
obj[1],
obj[2],
obj[3],
],
)
)
boxes.append(bndbox)
gt_classes.append(self.class_to_ind[name])
pid.append(int(obj[-1]))
img_name.append(int(img)) ###
difficult_boxes.append(difficult)
res = {
"img_name": torch.tensor(img_name, dtype=torch.int32),
"boxes": torch.tensor(boxes, dtype=torch.float32),
"labels": torch.tensor(gt_classes),
"pid": torch.tensor(pid, dtype=torch.int32),
"difficult": torch.tensor(difficult_boxes),
#"im_info": im_info,
}
return res
def get_img_info(self, index):
# get img_height and img_width. This is used if
# we want to split the batches according to the aspect ratio
# of the image, as it can be more efficient than loading the
# image from disk
img_id = self.ids[index]
im_path = os.path.join(self.root, img_id)
img = cv2.imread(im_path).astype(np.float32)
orig_shape = img.shape
return {"height": orig_shape[0], "width": orig_shape[1]}
def map_class_id_to_class_name(self, class_id):
return SYSUDataset.CLASSES[class_id]
|
the-stack_106_16687
|
"""A WeatherController Module."""
from masonite.request import Request
from masonite.view import View
from masonite.controllers import Controller
from app.City import City
import requests
class WeatherController(Controller):
"""WeatherController Controller Class."""
def __init__(self, request: Request):
"""WeatherController Initializer
Arguments:
request {masonite.request.Request} -- The Masonite Request class.
"""
self.request = request
def show(self, view: View):
cities = City.all()
API_KEY = '83c2a4b1cd7f54c707c77b0aa0ad102d'
url = 'https://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid={}'
weather_data = []
for city in cities:
city_weather = requests.get(url.format(city.name,API_KEY)).json()
#request data in json and then convert it in python data
weather = {
'id':city.id,
'city' : city.name,
'temperature' : city_weather['main']['temp'],
'description' : city_weather['weather'][0]['description'],
'icon' : city_weather['weather'][0]['icon']
}
weather_data.append(weather)
return view.render('weather', {'weather_data':weather_data })
def store(self, request: Request, view : View):
#Before saving city in our database, we must ask our API if the city exitsts
API_KEY = '83c2a4b1cd7f54c707c77b0aa0ad102d'
url = 'https://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid={}'
city = request.input('name')
city_weather = requests.get(url.format(city,API_KEY)).json()
if city_weather['cod'] == '404':
return request.redirect('/')
else:
City.create(
name = request.input("name")
)
return request.redirect('/')
def delete(self, request: Request):
city = City.find(request.param('id'))
city.delete()
return request.redirect('/')
|
the-stack_106_16688
|
"""1332_removing_non_null_constraints_on_entity
Revision ID: b2749f31f268
Revises: 4a305e1c8c69
Create Date: 2019-09-24 14:48:54.760496
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b2749f31f268'
down_revision = '4a305e1c8c69'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('entity', 'business_number',
existing_type=sa.VARCHAR(length=100),
nullable=True)
op.alter_column('entity', 'name',
existing_type=sa.VARCHAR(length=250),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('entity', 'name',
existing_type=sa.VARCHAR(length=250),
nullable=False)
op.alter_column('entity', 'business_number',
existing_type=sa.VARCHAR(length=100),
nullable=False)
# ### end Alembic commands ###
|
the-stack_106_16689
|
from __future__ import annotations
import math
from typing import NamedTuple, Optional
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from chorus import metadata
from chorus.config import DATA_FOLDER
from chorus.geo import Presence
AUDIO_FOLDER = DATA_FOLDER / "xeno-canto" / "numpy"
class SongDataset(torch.utils.data.Dataset):
"""
Create a tf.data.Dataset from the given dataframe and optional
augmenting dataframe.
"""
def __init__(
self,
df: pd.DataFrame,
aug_df: pd.DataFrame | None,
targets: list[str],
train_samples: int,
):
"""
Inputs
------
df : pd.DataFrame
Dataframe that contains the columns 'id', 'en', and 'also'.
Likely the result of calling xeno_canto_meta()
aug_df : pd.DataFrame
Same format as `df`. If provided, we assume we are creating
a training dataset and will also randomly add noise and shuffling.
The audio in this dataframe will sometimes be added
to the original audio.
targets : list[str]
names of each target in output
train_samples : int
num samples per example
"""
super().__init__()
self.df = df
self.aug_df = aug_df
self.is_train = aug_df is not None
self.targets = targets
self.train_samples = train_samples
self.background_files = list((DATA_FOLDER / "background").glob("*"))
self.np_rng = np.random.RandomState(seed=20200313)
@staticmethod
def _row_to_labels(row):
return [row["scientific-name"]] + [x for x in row["also"] if x]
def load(self, xc_id: int, target_length: int) -> np.ndarray:
x = np.load(AUDIO_FOLDER / f"{xc_id}.npy", mmap_mode="r")
if x.size < target_length * 2:
# repeat signal to have length >= target_length * 2
x = np.tile(x, math.ceil(target_length * 2 / x.size))
start = self.np_rng.randint(0, max(x.size - target_length, 1))
x = x[start : start + target_length].copy()
return x
def augment(self, x, y_names):
if self.np_rng.random() < 1 / 32:
aug_index = self.np_rng.randint(self.aug_df.shape[0])
aug_row = self.aug_df.iloc[aug_index]
y_names = y_names + self._row_to_labels(aug_row)
smoothing = self.np_rng.random() * 0.75
x += self.load(aug_row["id"], self.train_samples) * smoothing
if self.np_rng.random() < 1 / 32:
x += self.np_rng.normal(
scale=x.std() * self.np_rng.random() * 0.25,
size=x.size,
)
if self.np_rng.random() < 1 / 32:
background = np.load(
self.np_rng.choice(self.background_files), mmap_mode="r"
) * (self.np_rng.random() / 2 + 0.05)
if background.size < x.size:
# Add fade in / fade out
background[: background.size // 10] *= np.linspace(
0, 1, background.size // 10, endpoint=False
)
background[-background.size // 10 :] *= np.linspace(
1, 0, background.size // 10, endpoint=False
)
start = self.np_rng.randint(x.size - background.size)
x[start : start + background.size] += background
else:
start = self.np_rng.randint(background.size - x.size)
x += background[start : start + x.size]
return x, y_names
def __getitem__(self, index):
"""
Inputs
------
index : int
Returns
-------
(input, expected_output)
"""
row = self.df.iloc[index]
xc_id = row["id"]
x = self.load(xc_id, self.train_samples)
y_names = self._row_to_labels(row)
if self.is_train:
x, y_names = self.augment(x, y_names)
y_names = set(y_names)
y = [1.0 if name in y_names else 0.0 for name in self.targets]
weights = [
1.0 if name == row["scientific-name"] else 0.2
for name in self.targets
]
return torch.as_tensor(x), torch.as_tensor(y), torch.as_tensor(weights)
def __len__(self):
return self.df.shape[0]
class Data(NamedTuple):
train: SongDataset
test: SongDataset
def model_data(
train_samples: int, targets: Optional[list[str]] = None
) -> tuple[list[str], Data]:
"""
Get the training and testing data to be used for the model.
Inputs
------
train_sample : int
How many samples per example
targets : Optional[list[str]]
If specified, this is the list of target species.
If not specified, this is inferred from the data itself.
Returns
-------
targets, Data
"""
df = metadata.xeno_canto()
observed_ids = [f.stem for f in AUDIO_FOLDER.glob("*")]
aug_df = df.loc[df["id"].isin(observed_ids)].copy()
aug_df = aug_df.loc[
(aug_df["length-seconds"] >= 5) & (aug_df["length-seconds"] <= 60)
]
names_in_geo = Presence().scientific_names
# Filter the data
df = df.loc[
df["q"].isin(["A", "B", "C"]) # "high quality" only
& (df["id"].isin(observed_ids)) # ignore data that failed to download
& (df["length-seconds"] >= 5) # not too short...
& (df["length-seconds"] <= 60) # or too long
& (df["scientific-name"].isin(names_in_geo)) # align with geo presence
& (df[["lat", "lng", "week"]].notnull().all(axis=1)) # geo query-able
]
if targets is None:
targets = sorted(
df["scientific-name"]
.value_counts()[lambda x: x >= 50]
.index.tolist()
)
df = df.loc[df["scientific-name"].isin(targets)]
train_df, test_df = train_test_split(
df, test_size=0.3, stratify=df["scientific-name"], random_state=2020310
)
aug_df.drop(test_df.index, inplace=True)
return targets, Data(
train=SongDataset(train_df, aug_df, targets, train_samples),
test=SongDataset(test_df, None, targets, train_samples),
)
|
the-stack_106_16690
|
#!/usr/bin/env python3
"""William Jenkins
Scripps Institution of Oceanography, UC San Diego
wjenkins [at] ucsd [dot] edu
May 2021
Contains functions, routines, and data recording for DEC model
initialization, training, validation, and inference.
"""
from datetime import datetime
import fnmatch
import os
import pickle
import shutil
import threading
import time
import numpy as np
try:
import cupy
from cuml import KMeans, TSNE
from cuml.metrics.cluster.silhouette_score \
import cython_silhouette_samples as silhouette_samples
except:
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from sklearn.metrics import silhouette_samples
import pandas as pd
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.metrics.pairwise import linear_kernel
from sklearn.mixture import GaussianMixture
import torch
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from RISCluster import plotting, utils
def batch_eval(dataloader, model, device, mute=True):
'''Run DEC model in batch_inference mode.
Parameters
----------
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
model : PyTorch model instance
Model with trained parameters
device : PyTorch device object ('cpu' or 'gpu')
mute : Boolean (default=False)
Verbose mode
Returns
-------
z_array : array (M,D)
Latent space data (m_samples, d_features)
'''
model.eval()
bsz = dataloader.batch_size
z_array = np.zeros(
(len(dataloader.dataset),
model.encoder.encoder[11].out_features),
dtype=np.float32
)
# If the model has the "n_clusters" attribute, then the correct
# outputs must be collected. In pre-training mode, there is no
# clustering layer, hence the model only returns 2 quantities; for
# other modes, the model yields 3 quantities.
if hasattr(model, 'n_clusters'):
q_array = np.zeros(
(len(dataloader.dataset),
model.n_clusters),
dtype=np.float32
)
for b, batch in enumerate(tqdm(dataloader, disable=mute)):
_, batch = batch
x = batch.to(device)
q, _, z = model(x)
q_array[b * bsz:(b*bsz) + x.size(0), :] = q.detach().cpu().numpy()
z_array[b * bsz:(b*bsz) + x.size(0), :] = z.detach().cpu().numpy()
labels = np.argmax(q_array.data, axis=1)
return np.round(q_array, 5), labels, z_array
else:
for b, batch in enumerate(tqdm(dataloader, disable=mute)):
_, batch = batch
x = batch.to(device)
_, z = model(x)
z_array[b * bsz:(b*bsz) + x.size(0), :] = z.detach().cpu().numpy()
return z_array
def batch_training(model, dataloader, optimizer, metric, device):
'''Run DEC model in batch_training mode.
Parameters
----------
model : PyTorch model instance
Model with untrained parameters.
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
optimizer : PyTorch optimizer instance
metric : PyTorch metric instance
Measures the loss function.
device : PyTorch device object ('cpu' or 'gpu')
Returns
-------
model : Pytorch model instance
Model with trained parameters.
epoch_loss : float
Loss function value for the given epoch.
'''
model.train()
running_loss = 0.0
running_size = 0
pbar = tqdm(
dataloader,
leave=True,
desc=" Training",
unit="batch",
postfix={str(metric)[:-2]: "%.6f" % 0.0},
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
for batch in pbar:
_, batch = batch
x = batch.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(True):
x_rec, _ = model(x)
loss = metric(x_rec, x)
loss.backward()
optimizer.step()
running_loss += loss.cpu().detach().numpy() * x.size(0)
running_size += x.size(0)
pbar.set_postfix(
{str(metric)[:-2]: f"{(running_loss / running_size):.4e}"}
)
epoch_loss = running_loss / len(dataloader.dataset)
return model, epoch_loss
def batch_validation(model, dataloader, metrics, config):
'''Run DEC model in batch_validation mode.
Parameters
----------
model : PyTorch model instance
Model with trained parameters.
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
metrics : list
List of PyTorch metric instances
config : Configuration object
Object containing information about the experiment configuration
Returns
-------
epoch_loss : float
Loss function value for the given epoch.
'''
if not isinstance(metrics, list):
metrics = [metrics]
model.eval()
running_loss = np.zeros((len(metrics),), dtype=float)
running_size = np.zeros((len(metrics),), dtype=int)
pbar = tqdm(
dataloader,
leave=True,
desc="Validation",
unit="batch",
postfix={str(metric)[:-2]: "%.6f" % 0.0 for metric in metrics},
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
for batch in pbar:
_, batch = batch
x = batch.to(config.device)
loss = torch.zeros((len(metrics),))
with torch.no_grad():
if config.model == 'AEC':
x_rec, _ = model(x)
elif config.model == 'DEC':
_, x_rec, _ = model(x)
for i, metric in enumerate(metrics):
loss[i] = metric(x_rec, x)
running_loss += loss.cpu().detach().numpy() * x.size(0)
running_size += x.size(0)
pbar.set_postfix(
{
str(metric)[:-2]: f"{(running_loss[i] / running_size[i]):.4e}" \
for i, metric in enumerate(metrics)
}
)
epoch_loss = running_loss / len(dataloader.dataset)
return epoch_loss
def cluster_metrics(path, labels, x, z, save=True):
'''Calculates various metrics for clustering performance analysis.
Parameters
----------
path : str
Path to which clustering metrics results are saved.
labels : array (M,)
Sample-wise cluster assignment
x : array (M, D_)
Data space, i.e., spectrogram data (M samples, D_ features)
z : array (M,D)
Latent space data (M samples, D features)
save : bool (Default: True)
Save results to file or not.
Returns
-------
M : array (K,)
Number of data samples assigned to each class (K labels)
X_ip_avg : array (K,)
Inner product between the data space points and their mean.
X_MSE : array (K, D_)
Mean squared error between the data space points and their mean.
(K labels, D_ features)
X_MSE_avg : array (K,)
Class-averaged mean squared error between the data space points
and their mean.
X_MAE : array (K, D_)
Mean absolute error between the data space points and their
mean (K labels, D_ features).
X_MAE_avg : array (K,)
Class-averaged mean absolute error between the data space points
and their mean.
silh_scores_Z : array (K,)
Class-averaged silhouette scores of the latent space.
silh_scores_X : array (K,)
Class-averaged silhouette scores of the data space.
df : Pandas DataFrame
Dataframe containing all metrics results
'''
label_list = np.unique(labels)
n_clusters = len(label_list)
if torch.cuda.is_available():
silh_scores_Z = silhouette_samples(z, labels, chunksize=20000)
silh_scores_Z = cupy.asnumpy(silh_scores_Z)
else:
silh_scores_Z = silhouette_samples(z, labels)
silh_scores_X, _ = silhouette_samples_X(x, labels, RF=3)
silh_scores_avg_Z = np.mean(silh_scores_Z)
silh_scores_avg_X = np.mean(silh_scores_X)
_, _, n, o = x.shape
M = np.zeros((n_clusters,), dtype=int)
X_ip_avg = np.zeros((n_clusters,))
X_MSE = np.zeros((n_clusters, n*o))
X_MAE = np.zeros((n_clusters, n*o))
X_MSE_avg = np.zeros((n_clusters,))
X_MAE_avg = np.zeros((n_clusters,))
class_silh_scores_Z = np.zeros((n_clusters,))
class_silh_scores_X = np.zeros((n_clusters,))
for j in range(n_clusters):
# Data Space Metrics:
x_j = np.reshape(x[labels==j], (-1, 8700))
M[j] = len(x_j)
x_mean = np.mean(x_j, axis=0).reshape((1,-1))
x_mean = np.matlib.repmat(x_mean, M[j], 1)
# Inner Product
X_ip = linear_kernel(x_j, x_mean[0].reshape(1, -1)).flatten()
X_ip_avg[j] = np.mean(X_ip)
# MSE
X_MSE[j] = mean_squared_error(x_mean, x_j, multioutput='raw_values')
X_MSE_avg[j] = np.mean(X_MSE)
# MAE
X_MAE[j] = mean_absolute_error(x_mean, x_j, multioutput='raw_values')
X_MAE_avg[j] = np.mean(X_MAE)
# Silhouette Score - Latent Space
class_silh_scores_Z[j] = np.mean(silh_scores_Z[labels==j])
# Silhouette Score - Data Space
class_silh_scores_X[j] = np.mean(silh_scores_X[labels==j])
if save:
np.save(os.path.join(path, 'X_ip'), X_ip_avg)
np.save(os.path.join(path, 'X_MSE'), X_MSE)
np.save(os.path.join(path, 'X_MSE_avg'), X_MSE_avg)
np.save(os.path.join(path, 'X_MAE'), X_MAE)
np.save(os.path.join(path, 'X_MAE_avg'), X_MAE_avg)
np.save(os.path.join(path, 'silh_scores_Z'), silh_scores_Z)
np.save(os.path.join(path, 'silh_scores_X'), silh_scores_X)
df = pd.DataFrame(
data={
'class': label_list,
'N': M,
'inner_product': X_ip_avg,
'MSE_avg': X_MSE_avg,
'MAE_avg': X_MAE_avg,
'silh_score_Z': class_silh_scores_Z,
'silh_score_X': class_silh_scores_X
}
)
df.loc['mean'] = df.mean()
df.loc['mean']['class', 'N'] = None
df.loc['mean']['silh_score_Z'] = silh_scores_avg_Z
df.loc['mean']['silh_score_X'] = silh_scores_avg_X
df.to_csv(os.path.join(path, 'cluster_performance.csv'))
return M, X_ip_avg, X_MSE, X_MSE_avg, X_MAE, X_MAE_avg, silh_scores_Z, silh_scores_X, df
def gmm(z_array, n_clusters):
'''Initialize clusters using Gaussian mixtures model algorithm.
Parameters
----------
z_array : array (M,D)
Latent space data (m_samples, d_features)
n_clusters : int
Number of clusters.
Returns
-------
labels : array (M,)
Sample-wise cluster assignment
centroids : array (n_clusters, n_features)
Cluster centroids
'''
M = z_array.shape[0]
# Initialize w/ K-Means
km = KMeans(
n_clusters=n_clusters,
max_iter=1000,
n_init=100,
random_state=2009
)
km.fit_predict(z_array)
labels = km.labels_
centroids = km.cluster_centers_
labels, counts = np.unique(labels, return_counts=True)
# Perform EM
gmm_weights = np.empty(len(labels))
for i in range(len(labels)):
gmm_weights[i] = counts[i] / M
GMM = GaussianMixture(
n_components=n_clusters,
max_iter=1000,
n_init=1,
weights_init=gmm_weights,
means_init=centroids
)
np.seterr(under='ignore')
labels = GMM.fit_predict(z_array)
centroids = GMM.means_
return labels, centroids
def gmm_fit(config, z_array, n_clusters):
'''Perform GMM clustering and save results.
Parameters
----------
config : Configuration object
Object containing information about the experiment configuration
z_array : array (M,D)
Latent space data (m_samples, d_features)
n_clusters : int
Number of clusters.
Returns
-------
labels : array (M,)
Sample-wise cluster assignment
centroids : array (n_clusters, n_features)
Cluster centroids
'''
tic = datetime.now()
print('Performing GMM...', end="", flush=True)
labels, centroids = gmm(z_array, n_clusters)
print('complete.')
print('Saving data......', end="", flush=True)
M = z_array.shape[0]
A = [{'idx': i, 'label': labels[i]} for i in np.arange(M)]
utils.save_labels(A, config.savepath_run)
np.save(os.path.join(config.savepath_run, 'labels'), labels)
np.save(os.path.join(config.savepath_run, 'centroids'), centroids)
print('complete.')
print('Performing clustering metrics...', end='', flush=True)
x = np.load(config.fname_dataset + '.npy')
_, _, _, _, _, _, silh_scores_Z, silh_scores_X, _ = cluster_metrics(
config.savepath_run,
labels,
x,
z_array
)
fig1 = plotting.view_silhscore(
silh_scores_Z,
labels,
n_clusters,
config.model,
config.show
)
fig1.savefig(
os.path.join(config.savepath_run, 'silh_score_Z.png'),
dpi=300,
facecolor='w'
)
fig2 = plotting.view_silhscore(
silh_scores_X,
labels,
n_clusters,
config.model,
config.show
)
fig2.savefig(
os.path.join(config.savepath_run, 'silh_score_X.png'),
dpi=300,
facecolor='w'
)
tsne_results = tsne(z_array)
fig3 = plotting.view_TSNE(tsne_results, labels, 'GMM', config.show)
fig3.savefig(
os.path.join(config.savepath_run, 't-SNE.png'),
dpi=300,
facecolor='w'
)
print('complete.')
toc = datetime.now()
print(f'GMM complete at {toc}; time elapsed = {toc-tic}.')
def initialize_clusters(model, dataloader, config, n_clusters=None):
'''Function selects and performs cluster initialization.
Parameters
----------
model : PyTorch model instance
Model with trained parameters.
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
config : Configuration object
Object containing information about the experiment configuration
n_clusters : int
Number of clusters.
Returns
-------
labels : array (M,)
Sample-wise cluster assignment
centroids : array (n_clusters, n_features)
Cluster centroids
'''
if config.init == 'load':
print('Loading cluster initialization...', end='', flush=True)
path = os.path.abspath(os.path.join(config.saved_weights, os.pardir))
path = os.path.join(path, 'GMM', f'n_clusters={n_clusters}')
labels = np.load(os.path.join(path, 'labels.npy'))[config.index_tra]
centroids = np.load(os.path.join(path, 'centroids.npy'))
if config.init == "rand": # Random Initialization (for testing)
print('Initiating clusters with random points...', end='', flush=True)
labels, centroids = np.random.randint(0, n_clusters, (100)), np.random.uniform(size=(n_clusters,9))
else:
_, _, z_array = batch_eval(dataloader, model, config.device)
if config.init == "kmeans":
print('Initiating clusters with k-means...', end="", flush=True)
labels, centroids = kmeans(z_array, model.n_clusters)
elif config.init == "gmm": # GMM Initialization:
print('Initiating clusters with GMM...', end="", flush=True)
labels, centroids = gmm(z_array, model.n_clusters)
return labels, centroids
def kmeans(z_array, n_clusters):
'''Initiate clusters using k-means algorithm.
Parameters
----------
z_array : array (M,D)
Latent space data (m_samples, d_features)
n_clusters : int
Number of clusters.
Returns
-------
labels : array (M,)
Sample-wise cluster assignment
centroids : array (n_clusters,)
Cluster centroids
'''
km = KMeans(
n_clusters=n_clusters,
max_iter=1000,
n_init=100,
random_state=2009
)
km.fit_predict(z_array)
labels = km.labels_
centroids = km.cluster_centers_
return labels, centroids
def model_prediction(
config,
model,
dataloader,
metrics,
):
'''Primary machinery function for using AEC or DEC model in
prediction (evaluation) mode.
Parameters
----------
config : Configuration object
Object containing information about the experiment configuration
model : PyTorch model instance
Model with trained parameters.
dataloader : PyTorch dataloader instance
Loads data from disk into memory.
metrics : list
List of PyTorch metric instances
'''
print(f'Evaluating data using {config.model} model...')
device = config.device
n_clusters = config.n_clusters
savepath = config.savepath_exp
model.load_state_dict(torch.load(config.saved_weights, map_location=device))
model.eval()
bsz = dataloader.batch_size
z_array = np.zeros((len(dataloader.dataset), model.encoder.encoder[11].out_features), dtype=np.float32)
xr_array = np.zeros((len(dataloader.dataset), 1, 87, 100), dtype=np.float32)
pbar = tqdm(
dataloader,
leave=True,
desc="Loading",
unit="batch",
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
if config.model == 'DEC':
q_array = np.zeros((len(dataloader.dataset), n_clusters),dtype=np.float32)
for b, batch in enumerate(pbar):
_, batch = batch
x = batch.to(device)
q, xr, z = model(x)
q_array[b * bsz:(b*bsz) + x.size(0), :] = q.detach().cpu().numpy()
z_array[b * bsz:(b*bsz) + x.size(0), :] = z.detach().cpu().numpy()
xr_array[b * bsz:(b*bsz) + x.size(0), :] = xr.detach().cpu().numpy()
labels = np.argmax(q_array.data, axis=1)
centroids = model.clustering.weights.detach().cpu().numpy()
del batch, x, q, xr, z
time.sleep(1)
print('Saving data...', end="", flush=True)
M = q_array.shape[0]
A = [{'idx': i, 'label': labels[i]} for i in np.arange(M)]
utils.save_labels(A, savepath)
np.save(os.path.join(savepath, 'q_DEC'), q_array)
np.save(os.path.join(savepath, 'Z_DEC'), z_array)
np.save(os.path.join(savepath, 'Xr_DEC'), xr_array)
np.save(os.path.join(savepath, 'labels_DEC'), labels)
np.save(os.path.join(savepath, 'centroids_DEC'), centroids)
print('complete.')
print('Performing clustering metrics...', end='', flush=True)
x = np.load(config.fname_dataset + '.npy')
_, _, _, _, _, _, silh_scores_Z, silh_scores_X, _ = cluster_metrics(savepath, labels, x, z_array)
fig = plotting.view_silhscore(silh_scores_Z, labels, n_clusters, config.model, config.show)
fig.savefig(os.path.join(savepath, 'silh_score_Z.png'), dpi=300, facecolor='w')
fig = plotting.view_silhscore(silh_scores_X, labels, n_clusters, config.model, config.show)
fig.savefig(os.path.join(savepath, 'silh_score_X.png'), dpi=300, facecolor='w')
print('complete.')
print('Creating figures...')
AEC_configpath = os.path.abspath(os.path.join(savepath, os.pardir, os.pardir))
AEC_configname = fnmatch.filter([f for f in os.listdir(AEC_configpath) if os.path.isfile(os.path.join(AEC_configpath, f))], '*.pkl')[0]
AEC_configpath = pickle.load(open(os.path.join(AEC_configpath, AEC_configname), 'rb'))['saved_weights']
fignames = [
'T-SNE',
'Gallery',
'LatentSpace',
'CDF',
'PDF'
]
figpaths = [os.path.join(savepath, name) for name in fignames]
[os.makedirs(path, exist_ok=True) for path in figpaths]
AEC_loadpath = os.path.abspath(os.path.join(AEC_configpath, os.pardir))
z_array_AEC = np.load(os.path.join(AEC_loadpath, 'Prediction', 'Z_AEC.npy'))
labels_GMM = np.load(os.path.join(AEC_loadpath, 'GMM', f'n_clusters={n_clusters}', 'labels.npy'))
centroids_GMM = np.load(os.path.join(AEC_loadpath, 'GMM', f'n_clusters={n_clusters}', 'centroids.npy'))
tsne_results = tsne(z_array)
plotargs = (
fignames,
figpaths,
model,
dataloader,
device,
config.fname_dataset,
z_array_AEC,
z_array,
labels_GMM,
labels,
centroids_GMM,
centroids,
tsne_results,
0,
config.show
)
plot_process = threading.Thread(
target=plotting.plotter_mp,
args=plotargs
)
plot_process.start()
print('complete.')
elif config.model == 'AEC':
running_loss = 0.
running_size = 0
for b, batch in enumerate(tqdm(dataloader)):
_, batch = batch
x = batch.to(device)
xr, z = model(x)
loss = metrics[0](xr, x)
z_array[b * bsz:(b*bsz) + x.size(0), :] = z.detach().cpu().numpy()
xr_array[b * bsz:(b*bsz) + x.size(0), :] = xr.detach().cpu().numpy()
running_loss += loss.cpu().detach().numpy() * x.size(0)
running_size += x.size(0)
pbar.set_postfix(
{str(metrics)[0][:-2]: f"{(running_loss / running_size):.4e}"}
)
total_loss = running_loss / len(dataloader.dataset)
print(f'Dataset MSE = {total_loss:.4e}')
print('Saving data...', end="", flush=True)
with open(os.path.join(savepath, 'MSE.txt'), 'w') as f:
f.write(f'MSE = {total_loss:.4e}')
np.save(os.path.join(savepath, 'Loss_AEC'), total_loss)
np.save(os.path.join(savepath, 'Z_AEC'), z_array)
np.save(os.path.join(savepath, 'Xr_AEC'), xr_array)
print('complete.')
def model_training(config, model, dataloaders, metrics, optimizer, **hpkwargs):
'''Primary machinery function for using AEC or DEC model in
training mode.
Parameters
----------
config : Configuration object
Object containing information about the experiment configuration
model : PyTorch model instance
Model with trained parameters.
dataloaders : list
List of PyTorch dataloader instances that load data from disk
into memory.
metrics : list
List of PyTorch metric instances
optimizer : PyTorch optimizer instance
hpkwargs : dict
Dictionary of hyperparameter values.
'''
def AEC_training(config, model, dataloaders, metrics, optimizer, tb, **hpkwargs):
'''Subroutine for AEC training.
Parameters
----------
config : Configuration object
Object containing information about the experiment configuration
model : PyTorch model instance
Model with trained parameters.
dataloaders : list
List of PyTorch dataloader instances that load data from disk
into memory.
metrics : list
List of PyTorch metric instances
optimizer : PyTorch optimizer instance
tb : Tensorboard instance
For writing results to Tensorboard.
hpkwargs : dict
Dictionary of hyperparameter values.
'''
batch_size = hpkwargs.get('batch_size')
lr = hpkwargs.get('lr')
device = config.device
savepath_run = config.savepath_run
savepath_chkpnt = config.savepath_chkpnt
tra_loader = dataloaders[0]
val_loader = dataloaders[1]
if config.early_stopping:
best_val_loss = 10000
epochs = list()
tra_losses = list()
val_losses = list()
finished = False
n_epochs = config.n_epochs
fig = plotting.compare_images(
model,
0,
config,
savepath=savepath_run
)
tb.add_figure(
'TrainingProgress',
fig,
global_step=0,
close=True
)
del fig
for epoch in range(n_epochs):
print('-' * 100)
print(
f'Epoch [{epoch+1}/{n_epochs}] | '
f'Batch Size = {batch_size} | LR = {lr}'
)
# ==== Training Loop: =============================================
model, epoch_tra_mse = batch_training(model, tra_loader, optimizer, metrics[0], device)
tb.add_scalar('Training MSE', epoch_tra_mse, epoch+1)
if ((epoch + 1) % 5) == 0 and not (epoch == 0):
fig = plotting.compare_images(
model,
epoch + 1,
config,
savepath=savepath_run
)
tb.add_figure(
'TrainingProgress',
fig,
global_step=epoch+1,
close=True
)
del fig
# ==== Validation Loop: ===========================================
epoch_val_mse = batch_validation(model, val_loader, metrics, config)[0]
tb.add_scalar('Validation MSE', epoch_val_mse, epoch+1)
epochs, tra_losses, val_losses = utils.add_to_history(
[epochs, tra_losses, val_losses],
[epoch+1, epoch_tra_mse, epoch_val_mse]
)
if config.early_stopping:
if epoch_val_mse < best_val_loss:
strikes = 0
best_val_loss = epoch_val_mse
torch.save(
model.state_dict(),
os.path.join(savepath_chkpnt, 'AEC_Best_Weights.pt')
)
else:
if epoch == 0:
strikes = 1
else:
strikes += 1
if epoch > config.patience and strikes > config.patience:
print('Stopping Early.')
finished = True
break
else:
torch.save(
model.state_dict(),
os.path.join(savepath_chkpnt, f'AEC_Params_{epoch+1:03d}.pt')
)
# Collect Results =====================================================
hist_path = os.path.join(savepath_run, 'AEC_history.csv')
_ = utils.save_history(
{
'Epoch': epochs,
'Training Loss': tra_losses,
'Validation Loss': val_losses
},
hist_path
)
fig = plotting.view_history_AEC(hist_path)
fig.savefig(hist_path[:-4] + '.png', dpi=300, facecolor='w')
del fig
tb.add_hparams(
{'Batch Size': batch_size, 'LR': lr},
{
'hp/Training MSE': epoch_tra_mse,
'hp/Validation MSE': epoch_val_mse
}
)
fig = plotting.compare_images(
model,
epoch+1,
config,
savepath=savepath_run
)
tb.add_figure(
'TrainingProgress',
fig,
global_step=epoch+1,
close=True
)
fname = os.path.join(savepath_run, 'AEC_Params_Final.pt')
if config.early_stopping and (finished == True or epoch == n_epochs-1):
shutil.move(
os.path.join(savepath_chkpnt, 'AEC_Best_Weights.pt'),
fname
)
else:
torch.save(model.state_dict(), fname)
tb.add_text("Path to Saved Weights", fname, global_step=None)
print('AEC parameters saved.')
print(f'Path to saved weights: {fname}')
def DEC_training(config, model, dataloaders, metrics, optimizer, tb, **hpkwargs):
'''Subroutine for DEC training.
Parameters
----------
config : Configuration object
Object containing information about the experiment configuration
model : PyTorch model instance
Model with trained parameters.
dataloaders : list
List of PyTorch dataloader instances that load data from disk
into memory.
metrics : list
List of PyTorch metric instances
optimizer : PyTorch optimizer instance
tb : Tensorboard instance
For writing results to Tensorboard.
hpkwargs : dict
Dictionary of hyperparameter values.
'''
batch_size = hpkwargs.get('batch_size')
lr = hpkwargs.get('lr')
n_clusters = hpkwargs.get('n_clusters')
gamma = hpkwargs.get('gamma')
tol = hpkwargs.get('tol')
device = config.device
savepath_run = config.savepath_run
tra_loader = dataloaders[0]
fignames = [
'T-SNE',
'Gallery',
'LatentSpace',
'CDF',
'PDF'
]
figpaths = [os.path.join(savepath_run, name) for name in fignames]
[os.makedirs(path, exist_ok=True) for path in figpaths]
model.load_state_dict(
torch.load(config.saved_weights, map_location=device), strict=False
)
model.eval()
metric_mse = metrics[0]
metric_kld = metrics[1]
M = len(tra_loader.dataset)
if config.update_interval == -1:
update_interval = int(np.ceil(M / (batch_size * 2)))
else:
update_interval = int(np.ceil(M / (batch_size * config.update_interval)))
tb = SummaryWriter(log_dir = savepath_run)
if config.tbpid is not None:
tb.add_text(
"Tensorboard PID",
f"To terminate this TB instance, kill PID: {config.tbpid}",
global_step=None
)
tb.add_text("Path to Saved Outputs", savepath_run, global_step=None)
labels_prev, centroids = initialize_clusters(
model,
tra_loader,
config,
n_clusters=n_clusters
)
cluster_weights = torch.from_numpy(centroids).to(device)
with torch.no_grad():
model.state_dict()["clustering.weights"].copy_(cluster_weights)
torch.save(
model.state_dict(),
os.path.join(savepath_run, 'DEC_Params_Initial.pt')
)
print('complete.')
q, _, z_array0 = batch_eval(tra_loader, model, device) # <-- The CUDA problem occurs in here
p = target_distribution(q)
epoch = 0
tsne_results = tsne(z_array0)
plotargs = (
fignames,
figpaths,
model,
tra_loader,
device,
config.fname_dataset,
z_array0,
z_array0,
labels_prev,
labels_prev,
centroids,
centroids,
tsne_results,
epoch,
config.show
)
plotkwargs = {
'tb': tb
}
plot_process = threading.Thread(
target=plotting.plotter_mp,
args=plotargs,
kwargs=plotkwargs
)
plot_process.start()
iters = list()
rec_losses = list()
clust_losses = list()
total_losses = list()
deltas_iter = list()
deltas = list()
n_iter = 1
n_epochs = config.n_epochs
finished = False
for epoch in range(n_epochs):
print('-' * 100)
print(
f'Epoch [{epoch+1}/{n_epochs}] | '
f'# Clusters = {n_clusters} | '
f'Batch Size = {batch_size} | '
f'LR = {lr} | '
f'gamma = {gamma} | '
f'tol = {tol}'
)
running_loss = 0.0
running_loss_rec = 0.0
running_loss_clust = 0.0
running_size = 0
pbar = tqdm(
tra_loader,
leave=True,
unit="batch",
postfix={
"MSE": "%.6f" % 0.0,
"KLD": "%.6f" % 0.0,
"Loss": "%.6f" % 0.0
},
bar_format='{l_bar}{bar:20}{r_bar}{bar:-20b}'
)
# Iterate over data:
for batch_num, batch in enumerate(pbar):
_, batch = batch
x = batch.to(device)
# Update target distribution, check performance
if (batch_num % update_interval == 0) and not \
(batch_num == 0 and epoch == 0):
q, labels, _ = batch_eval(tra_loader, model, device)
p = target_distribution(q)
# check stop criterion
delta_label = np.sum(labels != labels_prev).astype(np.float32)\
/ labels.shape[0]
deltas_iter, deltas = utils.add_to_history(
[deltas_iter, deltas],
[n_iter, delta_label]
)
# deltas.append(delta_label)
tb.add_scalar('delta', delta_label, n_iter)
labels_prev = np.copy(labels)
if delta_label < tol:
time.sleep(2)
print('Stop criterion met, training complete.')
finished = True
break
tar_dist = p[running_size:(running_size + x.size(0)), :]
tar_dist = torch.from_numpy(tar_dist).to(device)
# zero the parameter gradients
model.train()
optimizer.zero_grad()
# Calculate losses and backpropagate
with torch.set_grad_enabled(True):
q, x_rec, _ = model(x)
loss_rec = metric_mse(x_rec, x)
loss_clust = gamma * metric_kld(torch.log(q), tar_dist) \
/ x.size(0)
loss = loss_rec + loss_clust
loss.backward()
optimizer.step()
running_size += x.size(0)
running_loss += loss.detach().cpu().numpy() * x.size(0)
running_loss_rec += loss_rec.detach().cpu().numpy() * x.size(0)
running_loss_clust += loss_clust.detach().cpu().numpy() * x.size(0)
accum_loss = running_loss / running_size
accum_loss_rec = running_loss_rec / running_size
accum_loss_clust = running_loss_clust / running_size
pbar.set_postfix(
MSE = f"{accum_loss_rec:.4e}",
KLD = f"{accum_loss_clust:.4e}",
Loss = f"{accum_loss:.4e}"
)
iters, rec_losses, clust_losses, total_losses = \
utils.add_to_history(
[iters, rec_losses, clust_losses, total_losses],
[n_iter, accum_loss_rec, accum_loss_clust, accum_loss]
)
tb.add_scalars(
'Losses',
{
'Loss': accum_loss,
'MSE': accum_loss_rec,
'KLD': accum_loss_clust
},
n_iter
)
tb.add_scalar('Loss', accum_loss, n_iter)
tb.add_scalar('MSE', accum_loss_rec, n_iter)
tb.add_scalar('KLD', accum_loss_clust, n_iter)
n_iter += 1
# Save figures every 4 epochs or at end of training ===============
if (((epoch + 1) % 4 == 0) and not (epoch == 0)) or finished:
_, _, z_array1 = batch_eval(tra_loader, model, device)
tsne_results = tsne(z_array1)
plotargs = (
fignames,
figpaths,
model,
tra_loader,
device,
config.fname_dataset,
z_array0,
z_array1,
labels_prev,
labels,
centroids,
model.clustering.weights.detach().cpu().numpy(),
tsne_results,
epoch+1,
config.show
)
plotkwargs = {'tb': tb}
plot_process = threading.Thread(
target=plotting.plotter_mp,
args=plotargs,
kwargs=plotkwargs
)
plot_process.start()
if finished:
break
_ = utils.save_history(
{
'Iteration': iters,
'Reconstruction Loss': rec_losses,
'Clustering Loss': clust_losses,
'Total Loss': total_losses
},
os.path.join(savepath_run, 'DEC_history.csv')
)
_ = utils.save_history(
{
'Iteration': deltas_iter,
'Delta': deltas
},
os.path.join(savepath_run, 'Delta_history.csv')
)
tb.add_hparams(
{
'Clusters': n_clusters,
'Batch Size': batch_size,
'LR': lr,
'gamma': gamma,
'tol': tol},
{
'hp/MSE': accum_loss_rec,
'hp/KLD': accum_loss_clust,
'hp/Loss': accum_loss
}
)
fname = os.path.join(savepath_run, 'DEC_Params_Final.pt')
torch.save(model.state_dict(), fname)
tb.add_text("Path to Saved Weights", fname, global_step=None)
tb.close()
print('DEC parameters saved.')
tic = datetime.now()
print('Commencing training...')
tb = SummaryWriter(log_dir=config.savepath_run)
if config.tbpid is not None:
tb.add_text(
"Tensorboard PID",
f"To terminate this TB instance, kill PID: {config.tbpid}",
global_step=None
)
tb.add_text("Path to Saved Outputs", config.savepath_run, global_step=None)
if config.model == "AEC":
AEC_training(
config,
model,
dataloaders,
metrics,
optimizer,
tb,
**hpkwargs
)
elif config.model == "DEC":
DEC_training(
config,
model,
dataloaders,
metrics,
optimizer,
tb,
**hpkwargs
)
toc = datetime.now()
print(f'Training complete at {toc}; time elapsed = {toc-tic}.')
def silhouette_samples_X(x, labels, RF=2):
'''Calculates silhouette scores for the data space, i.e.,
spectrograms. Because of memory constraints, the silhouette score of
the entire dataset of spectrograms cannot be calculated, so the
data space is decimated by a reduction factor (RF). A GPU-enabled
score is computed if CUDA is available.
Parameters
----------
x : array (M, D_)
Data space, i.e., spectrograms (M samples, D_ features)
Returns
-------
scores : array (M / RF,)
Array containing sample-wise silhouette scores.
x_ : array (M / RF, D_)
Samples data space, i.e., spectrograms (M / RF samples,
D_ features)
'''
x_ = x[:, :, ::int(RF), ::int(RF)].squeeze()
_, n, o = x_.shape
x_ = np.reshape(x_, (-1, n * o))
scores = silhouette_samples(x_, labels, chunksize=20000)
if torch.cuda.is_available():
scores = cupy.asnumpy(scores)
x_ = np.reshape(x_, (-1, n, o))
return scores, x_
def target_distribution(q):
'''From Xie/Girshick/Farhadi (2016). Computes the target distribution p,
given soft assignements, q. The target distribtuion is generated by giving
more weight to 'high confidence' samples - those with a higher probability
of being a signed to a certain cluster. This is used in the KL-divergence
loss function.
Parameters
----------
q : array (M,D)
Soft assignement probabilities - Probabilities of each sample being
assigned to each cluster [n_samples, n_features]
Returns
-------
p : array (M,D)
Auxiliary target distribution of shape [n_samples, n_features].
'''
p = q ** 2 / np.sum(q, axis=0)
p = np.transpose(np.transpose(p) / np.sum(p, axis=1))
return np.round(p, 5)
def tsne(data):
'''Perform t-SNE on data.
Parameters
----------
data : array (M,N)
Returns
-------
results : array (M,2)
2-D t-SNE embedding
'''
print('Running t-SNE...', end="", flush=True)
M = len(data)
np.seterr(under='warn')
results = TSNE(
n_components=2,
perplexity=int(M/100),
early_exaggeration=20,
learning_rate=int(M/12),
n_iter=2000,
verbose=0,
random_state=2009
).fit_transform(data.astype('float64'))
print('complete.')
return results
|
the-stack_106_16692
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://bitbucket.org/rptlab/reportlab/history-node/tip/src/reportlab/graphics/renderPS.py
__version__='3.3.0'
__doc__="""Render drawing objects in Postscript"""
from reportlab.pdfbase.pdfmetrics import getFont, stringWidth, unicode2T1 # for font info
from reportlab.lib.utils import getBytesIO, getStringIO, asBytes, char2int, rawBytes, asNative, isUnicode
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.graphics.renderbase import Renderer, StateTracker, getStateDelta, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS
import math
from operator import getitem
from reportlab import rl_config, xrange, ascii
from reportlab.pdfgen.canvas import FILL_EVEN_ODD, FILL_NON_ZERO
_ESCAPEDICT={}
for c in xrange(256):
if c<32 or c>=127:
_ESCAPEDICT[c]= '\\%03o' % c
elif c in (ord('\\'),ord('('),ord(')')):
_ESCAPEDICT[c] = '\\'+chr(c)
else:
_ESCAPEDICT[c] = chr(c)
del c
def _escape_and_limit(s):
s = asBytes(s)
R = []
aR = R.append
n = 0
for c in s:
c = _ESCAPEDICT[char2int(c)]
aR(c)
n += len(c)
if n>=200:
n = 0
aR('\\\n')
return ''.join(R)
# we need to create encoding vectors for each font we use, or they will
# come out in Adobe's old StandardEncoding, which NOBODY uses.
PS_WinAnsiEncoding="""
/RE { %def
findfont begin
currentdict dup length dict begin
{ %forall
1 index /FID ne { def } { pop pop } ifelse
} forall
/FontName exch def dup length 0 ne { %if
/Encoding Encoding 256 array copy def
0 exch { %forall
dup type /nametype eq { %ifelse
Encoding 2 index 2 index put
pop 1 add
}{ %else
exch pop
} ifelse
} forall
} if pop
currentdict dup end end
/FontName get exch definefont pop
} bind def
/WinAnsiEncoding [
39/quotesingle 96/grave 128/euro 130/quotesinglbase/florin/quotedblbase
/ellipsis/dagger/daggerdbl/circumflex/perthousand
/Scaron/guilsinglleft/OE 145/quoteleft/quoteright
/quotedblleft/quotedblright/bullet/endash/emdash
/tilde/trademark/scaron/guilsinglright/oe/dotlessi
159/Ydieresis 164/currency 166/brokenbar 168/dieresis/copyright
/ordfeminine 172/logicalnot 174/registered/macron/ring
177/plusminus/twosuperior/threesuperior/acute/mu
183/periodcentered/cedilla/onesuperior/ordmasculine
188/onequarter/onehalf/threequarters 192/Agrave/Aacute
/Acircumflex/Atilde/Adieresis/Aring/AE/Ccedilla
/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute
/Icircumflex/Idieresis/Eth/Ntilde/Ograve/Oacute
/Ocircumflex/Otilde/Odieresis/multiply/Oslash
/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn
/germandbls/agrave/aacute/acircumflex/atilde/adieresis
/aring/ae/ccedilla/egrave/eacute/ecircumflex
/edieresis/igrave/iacute/icircumflex/idieresis
/eth/ntilde/ograve/oacute/ocircumflex/otilde
/odieresis/divide/oslash/ugrave/uacute/ucircumflex
/udieresis/yacute/thorn/ydieresis
] def
"""
class PSCanvas:
def __init__(self,size=(300,300), PostScriptLevel=2):
self.width, self.height = size
xtraState = []
self._xtraState_push = xtraState.append
self._xtraState_pop = xtraState.pop
self.comments = 0
self.code = []
self.code_append = self.code.append
self._sep = '\n'
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = self._color = None
self._fontsUsed = [] # track them as we go
self.setFont(STATE_DEFAULTS['fontName'],STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
self.PostScriptLevel=PostScriptLevel
self._fillMode = FILL_EVEN_ODD
def comment(self,msg):
if self.comments: self.code_append('%'+msg)
def drawImage(self, image, x1,y1, width=None,height=None): # Postscript Level2 version
# select between postscript level 1 or level 2
if self.PostScriptLevel==1:
self._drawImageLevel1(image, x1,y1, width, height)
elif self.PostScriptLevel==2:
self._drawImageLevel2(image, x1, y1, width, height)
else :
raise ValueError('Unsupported Postscript Level %s' % self.PostScriptLevel)
def clear(self):
self.code_append('showpage') # ugh, this makes no sense oh well.
def _t1_re_encode(self):
if not self._fontsUsed: return
# for each font used, reencode the vectors
C = []
for fontName in self._fontsUsed:
fontObj = getFont(fontName)
if not fontObj._dynamicFont and fontObj.encName=='WinAnsiEncoding':
C.append('WinAnsiEncoding /%s /%s RE' % (fontName, fontName))
if C:
C.insert(0,PS_WinAnsiEncoding)
self.code.insert(1, self._sep.join(C))
def save(self,f=None):
if not hasattr(f,'write'):
_f = open(f,'wb')
else:
_f = f
if self.code[-1]!='showpage': self.clear()
self.code.insert(0,'''\
%%!PS-Adobe-3.0 EPSF-3.0
%%%%BoundingBox: 0 0 %d %d
%%%% Initialization:
/m {moveto} bind def
/l {lineto} bind def
/c {curveto} bind def
''' % (self.width,self.height))
self._t1_re_encode()
_f.write(rawBytes(self._sep.join(self.code)))
if _f is not f:
_f.close()
from reportlab.lib.utils import markfilename
markfilename(f,creatorcode='XPR3',filetype='EPSF')
def saveState(self):
self._xtraState_push((self._fontCodeLoc,))
self.code_append('gsave')
def restoreState(self):
self.code_append('grestore')
self._fontCodeLoc, = self._xtraState_pop()
def stringWidth(self, s, font=None, fontSize=None):
"""Return the logical width of the string if it were drawn
in the current font (defaults to self.font)."""
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def setLineCap(self,v):
if self._lineCap!=v:
self._lineCap = v
self.code_append('%d setlinecap'%v)
def setLineJoin(self,v):
if self._lineJoin!=v:
self._lineJoin = v
self.code_append('%d setlinejoin'%v)
def setDash(self, array=[], phase=0):
"""Two notations. pass two numbers, or an array and phase"""
# copied and modified from reportlab.canvas
psoperation = "setdash"
if isinstance(array,(float,int)):
self.code_append('[%s %s] 0 %s' % (array, phase, psoperation))
elif isinstance(array,(tuple,list)):
assert phase >= 0, "phase is a length in user space"
textarray = ' '.join(map(str, array))
self.code_append('[%s] %s %s' % (textarray, phase, psoperation))
def setStrokeColor(self, color):
self._strokeColor = color
self.setColor(color)
def setColor(self, color):
if self._color!=color:
self._color = color
if color:
if hasattr(color, "cyan"):
self.code_append('%s setcmykcolor' % fp_str(color.cyan, color.magenta, color.yellow, color.black))
else:
self.code_append('%s setrgbcolor' % fp_str(color.red, color.green, color.blue))
def setFillColor(self, color):
self._fillColor = color
self.setColor(color)
def setFillMode(self, v):
self._fillMode = v
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.code_append('%s setlinewidth' % width)
def setFont(self,font,fontSize,leading=None):
if self._font!=font or self._fontSize!=fontSize:
self._fontCodeLoc = len(self.code)
self._font = font
self._fontSize = fontSize
self.code_append('')
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append('%s m %s l stroke' % (fp_str(x1, y1), fp_str(x2, y2)))
def _escape(self, s):
'''
return a copy of string s with special characters in postscript strings
escaped with backslashes.
'''
try:
return _escape_and_limit(s)
except:
raise ValueError("cannot escape %s" % ascii(s))
def _issueT1String(self,fontObj,x,y,s):
fc = fontObj
code_append = self.code_append
fontSize = self._fontSize
fontsUsed = self._fontsUsed
escape = self._escape
if not isUnicode(s):
try:
s = s.decode('utf8')
except UnicodeDecodeError as e:
i,j = e.args[2:4]
raise UnicodeDecodeError(*(e.args[:4]+('%s\n%s-->%s<--%s' % (e.args[4],s[i-10:i],s[i:j],s[j:j+10]),)))
for f, t in unicode2T1(s,[fontObj]+fontObj.substitutionFonts):
if f!=fc:
psName = asNative(f.face.name)
code_append('(%s) findfont %s scalefont setfont' % (psName,fp_str(fontSize)))
if psName not in fontsUsed:
fontsUsed.append(psName)
fc = f
code_append('%s m (%s) show ' % (fp_str(x,y),escape(t)))
x += f.stringWidth(t.decode(f.encName),fontSize)
if fontObj!=fc:
self._font = None
self.setFont(fontObj.face.name,fontSize)
def drawString(self, x, y, s, angle=0):
if self._fillColor != None:
fontObj = getFont(self._font)
if not self.code[self._fontCodeLoc]:
psName = asNative(fontObj.face.name)
self.code[self._fontCodeLoc]='(%s) findfont %s scalefont setfont' % (psName,fp_str(self._fontSize))
if psName not in self._fontsUsed:
self._fontsUsed.append(psName)
self.setColor(self._fillColor)
if angle!=0:
self.code_append('gsave %s translate %s rotate' % (fp_str(x,y),fp_str(angle)))
x = y = 0
if fontObj._dynamicFont:
s = self._escape(s)
self.code_append('%s m (%s) show ' % (fp_str(x,y),s))
else:
self._issueT1String(fontObj,x,y,s)
if angle!=0:
self.code_append('grestore')
def drawCentredString(self, x, y, text, text_anchor='middle'):
if self._fillColor is not None:
textLen = stringWidth(text, self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,self._font,self._fontSize)
self.drawString(x,y,text)
def drawRightString(self, text, x, y):
self.drawCentredString(text,x,y,text_anchor='end')
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
########################################################################################
def rect(self, x1,y1, x2,y2, stroke=1, fill=1):
"Draw a rectangle between x1,y1, and x2,y2"
# Path is drawn in counter-clockwise direction"
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
self.polygon(((x1,y1),(x2,y1),(x2,y2),(x1,y2)), closed=1, stroke=stroke, fill = fill)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8):
"""Draw a rounded rectangle between x1,y1, and x2,y2,
with corners inset as ellipses with x radius rx and y radius ry.
These should have x1<x2, y1<y2, rx>0, and ry>0."""
# Path is drawn in counter-clockwise direction
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
# Note: arcto command draws a line from current point to beginning of arc
# save current matrix, translate to center of ellipse, scale by rx ry, and draw
# a circle of unit radius in counterclockwise dir, return to original matrix
# arguments are (cx, cy, rx, ry, startAngle, endAngle)
ellipsePath = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s arc setmatrix'
# choice between newpath and moveTo beginning of arc
# go with newpath for precision, does this violate any assumptions in code???
rr = ['newpath'] # Round Rect code path
a = rr.append
# upper left corner ellipse is first
a(ellipsePath % (x1+rx, y1+ry, rx, -ry, 90, 180))
a(ellipsePath % (x1+rx, y2-ry, rx, -ry, 180, 270))
a(ellipsePath % (x2-rx, y2-ry, rx, -ry, 270, 360))
a(ellipsePath % (x2-rx, y1+ry, rx, -ry, 0, 90) )
a('closepath')
self._fillAndStroke(rr)
def ellipse(self, x1,y1, x2,y2):
"""Draw an orthogonal ellipse inscribed within the rectangle x1,y1,x2,y2.
These should have x1<x2 and y1<y2."""
#Just invoke drawArc to actually draw the ellipse
self.drawArc(x1,y1, x2,y2)
def circle(self, xc, yc, r):
self.ellipse(xc-r,yc-r, xc+r,yc+r)
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2."""
#calculate centre of ellipse
#print "x1,y1,x2,y2,startAng,extent,fromcenter", x1,y1,x2,y2,startAng,extent,fromcenter
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
codeline = self._genArcCode(x1, y1, x2, y2, startAng, extent)
startAngleRadians = math.pi*startAng/180.0
extentRadians = math.pi*extent/180.0
endAngleRadians = startAngleRadians + extentRadians
codelineAppended = 0
# fill portion
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append(codeline)
codelineAppended = 1
if self._strokeColor!=None: self.code_append('gsave')
self.lineTo(cx,cy)
self.code_append('eofill')
if self._strokeColor!=None: self.code_append('grestore')
# stroke portion
if self._strokeColor != None:
# this is a bit hacked up. There is certainly a better way...
self.setColor(self._strokeColor)
(startx, starty) = (cx+rx*math.cos(startAngleRadians), cy+ry*math.sin(startAngleRadians))
if not codelineAppended:
self.code_append(codeline)
if fromcenter:
# move to center
self.lineTo(cx,cy)
self.lineTo(startx, starty)
self.code_append('closepath')
self.code_append('stroke')
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
"Calculate the path for an arc inscribed in rectangle defined by (x1,y1),(x2,y2)"
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def polygon(self, p, closed=0, stroke=1, fill=1):
assert len(p) >= 2, 'Polygon must have 2 or more points'
start = p[0]
p = p[1:]
poly = []
a = poly.append
a("%s m" % fp_str(start))
for point in p:
a("%s l" % fp_str(point))
if closed:
a("closepath")
self._fillAndStroke(poly,stroke=stroke,fill=fill)
def lines(self, lineList, color=None, width=None):
if self._strokeColor != None:
self._setColor(self._strokeColor)
codeline = '%s m %s l stroke'
for line in lineList:
self.code_append(codeline % (fp_str(line[0]),fp_str(line[1])))
def moveTo(self,x,y):
self.code_append('%s m' % fp_str(x, y))
def lineTo(self,x,y):
self.code_append('%s l' % fp_str(x, y))
def curveTo(self,x1,y1,x2,y2,x3,y3):
self.code_append('%s c' % fp_str(x1,y1,x2,y2,x3,y3))
def closePath(self):
self.code_append('closepath')
def polyLine(self, p):
assert len(p) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.moveTo(p[0][0], p[0][1])
for t in p[1:]:
self.lineTo(t[0], t[1])
self.code_append('stroke')
def drawFigure(self, partList, closed=0):
figureCode = []
a = figureCode.append
first = 1
for part in partList:
op = part[0]
args = list(part[1:])
if op == figureLine:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s l" % fp_str(args[2:]))
elif op == figureArc:
first = 0
x1,y1,x2,y2,startAngle,extent = args[:6]
a(self._genArcCode(x1,y1,x2,y2,startAngle,extent))
elif op == figureCurve:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s curveto" % fp_str(args[2:]))
else:
raise TypeError("unknown figure operator: "+op)
if closed:
a("closepath")
self._fillAndStroke(figureCode)
def _fillAndStroke(self,code,clip=0,fill=1,stroke=1,fillMode=None):
fill = self._fillColor and fill
stroke = self._strokeColor and stroke
if fill or stroke or clip:
self.code.extend(code)
if fill:
if fillMode is None:
fillMode = self._fillMode
if stroke or clip: self.code_append("gsave")
self.setColor(self._fillColor)
self.code_append("eofill" if fillMode==FILL_EVEN_ODD else "fill")
if stroke or clip: self.code_append("grestore")
if stroke:
if clip: self.code_append("gsave")
self.setColor(self._strokeColor)
self.code_append("stroke")
if clip: self.code_append("grestore")
if clip:
self.code_append("clip")
self.code_append("newpath")
def translate(self,x,y):
self.code_append('%s translate' % fp_str(x,y))
def scale(self,x,y):
self.code_append('%s scale' % fp_str(x,y))
def transform(self,a,b,c,d,e,f):
self.code_append('[%s] concat' % fp_str(a,b,c,d,e,f))
def _drawTimeResize(self,w,h):
'''if this is used we're probably in the wrong world'''
self.width, self.height = w, h
def _drawImageLevel1(self, image, x1, y1, width=None, height=None):
# Postscript Level1 version available for fallback mode when Level2 doesn't work
# For now let's start with 24 bit RGB images (following piddlePDF again)
component_depth = 8
myimage = image.convert('RGB')
imgwidth, imgheight = myimage.size
if not width:
width = imgwidth
if not height:
height = imgheight
#print 'Image size (%d, %d); Draw size (%d, %d)' % (imgwidth, imgheight, width, height)
# now I need to tell postscript how big image is
# "image operators assume that they receive sample data from
# their data source in x-axis major index order. The coordinate
# of the lower-left corner of the first sample is (0,0), of the
# second (1,0) and so on" -PS2 ref manual p. 215
#
# The ImageMatrix maps unit squre of user space to boundary of the source image
#
# The CurrentTransformationMatrix (CTM) maps the unit square of
# user space to the rect...on the page that is to receive the
# image. A common ImageMatrix is [width 0 0 -height 0 height]
# (for a left to right, top to bottom image )
# first let's map the user coordinates start at offset x1,y1 on page
self.code.extend([
'gsave',
'%s %s translate' % (x1,y1), # need to start are lower left of image
'%s %s scale' % (width,height),
'/scanline %d 3 mul string def' % imgwidth # scanline by multiples of image width
])
# now push the dimensions and depth info onto the stack
# and push the ImageMatrix to map the source to the target rectangle (see above)
# finally specify source (PS2 pp. 225 ) and by exmample
self.code.extend([
'%s %s %s' % (imgwidth, imgheight, component_depth),
'[%s %s %s %s %s %s]' % (imgwidth, 0, 0, -imgheight, 0, imgheight),
'{ currentfile scanline readhexstring pop } false 3',
'colorimage '
])
# data source output--now we just need to deliver a hex encode
# series of lines of the right overall size can follow
# piddlePDF again
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('% end of image data') # for clarity
self.code_append('grestore') # return coordinates to normal
# end of drawImage
def _AsciiHexEncode(self, input): # also based on piddlePDF
"Helper function used by images"
output = getStringIO()
for char in asBytes(input):
output.write('%02x' % char2int(char))
return output.getvalue()
def _drawImageLevel2(self, image, x1,y1, width=None,height=None): # Postscript Level2 version
'''At present we're handling only PIL'''
### what sort of image are we to draw
if image.mode=='L' :
imBitsPerComponent = 8
imNumComponents = 1
myimage = image
elif image.mode == '1':
myimage = image.convert('L')
imNumComponents = 1
myimage = image
else :
myimage = image.convert('RGB')
imNumComponents = 3
imBitsPerComponent = 8
imwidth, imheight = myimage.size
if not width:
width = imwidth
if not height:
height = imheight
self.code.extend([
'gsave',
'%s %s translate' % (x1,y1), # need to start are lower left of image
'%s %s scale' % (width,height)])
if imNumComponents == 3 :
self.code_append('/DeviceRGB setcolorspace')
elif imNumComponents == 1 :
self.code_append('/DeviceGray setcolorspace')
# create the image dictionary
self.code_append("""
<<
/ImageType 1
/Width %d /Height %d %% dimensions of source image
/BitsPerComponent %d""" % (imwidth, imheight, imBitsPerComponent) )
if imNumComponents == 1:
self.code_append('/Decode [0 1]')
if imNumComponents == 3:
self.code_append('/Decode [0 1 0 1 0 1] %% decode color values normally')
self.code.extend([ '/ImageMatrix [%s 0 0 %s 0 %s]' % (imwidth, -imheight, imheight),
'/DataSource currentfile /ASCIIHexDecode filter',
'>> % End image dictionary',
'image'])
# after image operator just need to dump image dat to file as hexstring
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('> % end of image data') # > is EOD for hex encoded filterfor clarity
self.code_append('grestore') # return coordinates to normal
# renderpdf - draws them onto a canvas
"""Usage:
from reportlab.graphics import renderPS
renderPS.draw(drawing, canvas, x, y)
Execute the script to see some test drawings."""
from reportlab.graphics.shapes import *
# hack so we only get warnings once each
#warnOnce = WarnOnce()
# the main entry point for users...
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
"""As it says"""
R = _PSRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
def _pointsFromList(L):
'''
given a list of coordinates [x0, y0, x1, y1....]
produce a list of points [(x0,y0), (y1,y0),....]
'''
P=[]
a = P.append
for i in range(0,len(L),2):
a((L[i],L[i+1]))
return P
class _PSRenderer(Renderer):
"""This draws onto a EPS document. It needs to be a class
rather than a function, as some EPS-specific state tracking is
needed outside of the state info in the SVG model."""
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
self._canvas.comment('begin node %r'%node)
color = self._canvas._color
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
self._canvas._color = color
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
## _restores = {'stroke':'_stroke','stroke_width': '_lineWidth','stroke_linecap':'_lineCap',
## 'stroke_linejoin':'_lineJoin','fill':'_fill','font_family':'_font',
## 'font_size':'_fontSize'}
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def drawRect(self, rect):
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, rect.rx, rect.ry
)
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r)
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
startangledegrees = wedge.startangledegrees
endangledegrees = wedge.endangledegrees
centerx= wedge.centerx
centery = wedge.centery
radius = wedge.radius
extent = endangledegrees - startangledegrees
self._canvas.drawArc(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2)
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1)
def drawString(self, stringObj):
if self._canvas._fillColor:
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x,stringObj.y,stringObj.text
if not text_anchor in ['start','inherited']:
font, fontSize = S['fontName'], S['fontSize']
textLen = stringWidth(text, font,fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,font,fontSize,encoding='winansi')
else:
raise ValueError('bad value for text_anchor '+str(text_anchor))
self._canvas.drawString(x,y,text)
def drawPath(self, path, fillMode=None):
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
autoclose = getattr(path,'autoclose','')
def rP(**kwds):
return _renderPath(path, drawFuncs, **kwds)
if fillMode is None:
fillMode = getattr(path,'fillMode',c._fillMode)
fill = c._fillColor is not None
stroke = c._strokeColor is not None
clip = path.isClipPath
fas = lambda **kwds: c._fillAndStroke([], fillMode=fillMode, **kwds)
pathFill = lambda : c._fillAndStroke([], stroke=0, fillMode=fillMode)
pathStroke = lambda : c._fillAndStroke([], fill=0)
if autoclose=='svg':
rP()
fas(stroke=stroke,fill=fill,clip=clip)
elif autoclose=='pdf':
if fill:
rP(forceClose=True)
fas(stroke=stroke,fill=fill,clip=clip)
elif stroke or clip:
rP()
fas(stroke=stroke,fill=0,clip=clip)
else:
if fill and rP(countOnly=True):
rP()
elif stroke or clip:
fas(stroke=stroke,fill=0,clip=clip)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
self._canvas.transform(value[0], value[1], value[2],
value[3], value[4], value[5])
elif key == 'strokeColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
## elif key == 'stroke_opacity':
## warnOnce('Stroke Opacity not supported yet')
elif key == 'fillColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setFillColor(value)
## elif key == 'fill_rule':
## warnOnce('Fill rules not done yet')
## elif key == 'fill_opacity':
## warnOnce('Fill opacity not done yet')
elif key in ['fontSize', 'fontName']:
# both need setting together in PDF
# one or both might be in the deltas,
# so need to get whichever is missing
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
def drawImage(self, image):
from reportlab.lib.utils import ImageReader
im = ImageReader(image.path)
self._canvas.drawImage(im._image,image.x,image.y,image.width,image.height)
def drawToFile(d,fn, showBoundary=rl_config.showBoundary,**kwd):
d = renderScaledDrawing(d)
c = PSCanvas((d.width,d.height))
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def drawToString(d, showBoundary=rl_config.showBoundary):
"Returns a PS as a string in memory, without touching the disk"
s = getBytesIO()
drawToFile(d, s, showBoundary=showBoundary)
return s.getvalue()
#########################################################
#
# test code. First, define a bunch of drawings.
# Routine to draw them comes at the end.
#
#########################################################
def test(outDir='epsout',shout=False):
from reportlab.graphics import testshapes
from reportlab.rl_config import verbose
OLDFONTS = testshapes._FONTS[:]
testshapes._FONTS[:] = ['Times-Roman','Times-Bold','Times-Italic', 'Times-BoldItalic','Courier']
try:
import os
# save all drawings and their doc strings from the test file
if not os.path.isdir(outDir):
os.mkdir(outDir)
#grab all drawings from the test module
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
drawing = eval('testshapes.' + funcname + '()') #execute it
docstring = eval('testshapes.' + funcname + '.__doc__')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = outDir + os.sep + 'renderPS_%d.eps'%i
drawToFile(d,filename)
if shout or verbose>2: print('renderPS test saved %s' % ascii(filename))
i += 1
finally:
testshapes._FONTS[:] = OLDFONTS
if __name__=='__main__':
import sys
if len(sys.argv)>1:
outdir = sys.argv[1]
else:
outdir = 'epsout'
test(outdir,shout=True)
|
the-stack_106_16693
|
from typing import Union, Dict
import pytest
@pytest.fixture(scope="session")
def client():
"""Return TestClient for the regular OPTIMADE server"""
from .utils import client_factory
return client_factory()(server="regular")
@pytest.fixture(scope="session")
def index_client():
"""Return TestClient for the index OPTIMADE server"""
from .utils import client_factory
return client_factory()(server="index")
@pytest.fixture(scope="session", params=["regular", "index"])
def both_clients(request):
"""Return TestClient for both the regular and index OPTIMADE server"""
from .utils import client_factory
return client_factory()(server=request.param)
@pytest.fixture(scope="session", params=["regular", "index"])
def both_fake_remote_clients(request):
"""Return TestClient for both the regular and index OPTIMADE server, with
the additional option `raise_server_exceptions` set to `False`, to mimic a
remote webserver.
"""
from .utils import client_factory
return client_factory()(server=request.param, raise_server_exceptions=False)
@pytest.fixture
def get_good_response(client, index_client):
"""Get response with some sanity checks, expecting '200 OK'"""
try:
import simplejson as json
except ImportError:
import json
from requests import Response
from .utils import OptimadeTestClient
def inner(
request: str,
server: Union[str, OptimadeTestClient] = "regular",
return_json: bool = True,
**kwargs,
) -> Union[dict, Response]:
if isinstance(server, str):
if server == "regular":
used_client = client
elif server == "index":
used_client = index_client
else:
pytest.fail(
f"Wrong value for 'server': {server}. It must be either 'regular' or 'index'."
)
elif isinstance(server, OptimadeTestClient):
used_client = server
else:
pytest.fail("'server' must be either a string or an OptimadeTestClient.")
try:
response = used_client.get(request, **kwargs)
response_json = response.json()
assert response.status_code == 200, f"Request failed: {response_json}"
expected_mime_type = "application/vnd.api+json"
assert (
response.headers["content-type"] == expected_mime_type
), f"Response should have MIME type {expected_mime_type!r}, not {response.headers['content-type']!r}."
except json.JSONDecodeError:
print(
f"Request attempted:\n{used_client.base_url}{used_client.version}"
f"{request}\n"
"Could not successfully decode response as JSON."
)
raise
except Exception as exc:
print(
f"Request attempted:\n{used_client.base_url}{used_client.version}"
f"{request}"
)
raise exc
else:
if return_json:
return response_json
return response
return inner
@pytest.fixture
def check_response(get_good_response):
"""Check response matches expectations for a given request.
Parameters:
request: The request to check.
expected_ids: A list of IDs, or a single ID to check
the response for.
page_limit: The number of results expected per page.
expected_return: The number of results expected to be returned.
expected_as_is: Whether to enforce the order of the IDs.
expected_warnings: A list of expected warning messages.
server: The type of server to test, or the actual test client class.
"""
from typing import List
from optimade.server.config import CONFIG
from .utils import OptimadeTestClient
def inner(
request: str,
expected_ids: Union[str, List[str]],
page_limit: int = CONFIG.page_limit,
expected_return: int = None,
expected_as_is: bool = False,
expected_warnings: List[Dict[str, str]] = None,
server: Union[str, OptimadeTestClient] = "regular",
):
response = get_good_response(request, server)
if isinstance(expected_ids, str):
expected_ids = [expected_ids]
response["data"] = [response["data"]]
response_ids = [struct["id"] for struct in response["data"]]
if expected_return is not None:
assert expected_return == response["meta"]["data_returned"]
assert len(response["data"]) == len(expected_ids)
if not expected_as_is:
expected_ids = sorted(expected_ids)
response_ids = sorted(response_ids)
assert expected_ids == response_ids
if expected_warnings:
assert "warnings" in response["meta"]
assert len(expected_warnings) == len(response["meta"]["warnings"])
for ind, warn in enumerate(expected_warnings):
for key in warn:
assert response["meta"]["warnings"][ind][key] == warn[key]
else:
assert "warnings" not in response["meta"]
return inner
@pytest.fixture
def check_error_response(client, index_client):
"""General method for testing expected erroneous response"""
from .utils import OptimadeTestClient
def inner(
request: str,
expected_status: int = None,
expected_title: str = None,
expected_detail: str = None,
server: Union[str, OptimadeTestClient] = "regular",
):
response = None
if isinstance(server, str):
if server == "regular":
used_client = client
elif server == "index":
used_client = index_client
else:
pytest.fail(
f"Wrong value for 'server': {server}. It must be either 'regular' or 'index'."
)
elif isinstance(server, OptimadeTestClient):
used_client = server
else:
pytest.fail("'server' must be either a string or an OptimadeTestClient.")
try:
response = used_client.get(request)
assert response.status_code == expected_status, (
f"Request should have been an error with status code {expected_status}, "
f"but instead {response.status_code} was received.\nResponse:\n{response.json()}",
)
expected_mime_type = "application/vnd.api+json"
assert (
response.headers["content-type"] == expected_mime_type
), f"Response should have MIME type {expected_mime_type!r}, not {response.headers['content-type']!r}."
response = response.json()
assert len(response["errors"]) == 1, response.get(
"errors", "'errors' not found in response"
)
assert response["meta"]["data_returned"] == 0, response.get(
"meta", "'meta' not found in response"
)
error = response["errors"][0]
assert str(expected_status) == error["status"], error
assert expected_title == error["title"], error
if expected_detail is None:
expected_detail = "Error trying to process rule "
assert error["detail"].startswith(expected_detail), (
"No expected_detail provided and the error did not start with a standard Lark "
"error message."
)
else:
assert expected_detail == error["detail"], error
except Exception:
print(
f"Request attempted:\n{used_client.base_url}{used_client.version}"
f"{request}"
)
if response:
print(f"\nCaptured response:\n{response}")
raise
return inner
|
the-stack_106_16694
|
import torch
from torch import nn
from torch_geometric.nn import GCNConv
from torch_geometric.nn import GraphConv, TopKPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
import torch.nn.functional as F
from layers import KGPool
class CharEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, drop_out_rate):
super(CharEmbeddings, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
char_embeds = self.embeddings(words_seq)
char_embeds = self.dropout(char_embeds)
return char_embeds
class NodeFeature(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, is_bidirectional, drop_out_rate, entity_embed_dim, conv_filter_size, embeddings, char_embed_dim, max_word_len_entity, char_vocab, char_feature_size):
super(NodeFeature, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.is_bidirectional = is_bidirectional
self.drop_rate = drop_out_rate
self.word_embeddings = nn.Embedding(
embeddings.shape[0], embeddings.shape[1], padding_idx=0)
self.word_embeddings.weight.data.copy_(torch.from_numpy(embeddings))
self.word_embeddings.weight.requires_grad = False
self.char_embeddings = CharEmbeddings(len(char_vocab), char_embed_dim, self.drop_rate)
self.lstm = nn.LSTM(embeddings.shape[1]+char_feature_size, self.hidden_dim, self.layers, batch_first=True,
bidirectional=bool(self.is_bidirectional))
self.conv1d = nn.Conv1d(char_embed_dim, char_feature_size, conv_filter_size,padding=0)
self.max_pool = nn.MaxPool1d(max_word_len_entity + conv_filter_size - 1, max_word_len_entity + conv_filter_size - 1)
def forward(self, words, chars):
batch_size = words.shape[0]
if len(words.shape)==3:
# max_batch_len = words.shape[1]
words = words.view(words.shape[0]*words.shape[1],words.shape[2])
chars = chars.view(chars.shape[0]*chars.shape[1],chars.shape[2])
src_word_embeds = self.word_embeddings(words)
try:
char_embeds = self.char_embeddings(chars)
except Exceprion as e:
import pdb; pdb.set_trace()
char_embeds = char_embeds.permute(0, 2, 1)
char_feature = torch.tanh(self.max_pool(self.conv1d(char_embeds)))
char_feature = char_feature.permute(0, 2, 1)
words_input = torch.cat((src_word_embeds, char_feature), -1)
outputs, hc = self.lstm(words_input)
h_n = hc[0].view(self.layers, 2, words.shape[0], self.hidden_dim)
h_n = h_n[-1,:,:,:].squeeze() # (num_dir,batch,hidden)
h_n = h_n.permute((1,0,2)) # (batch,num_dir,hidden)
h_n = h_n.contiguous().view(h_n.shape[0],h_n.shape[1]*h_n.shape[2]) # (batch,num_dir*hidden)
return h_n
class Net(torch.nn.Module):
def __init__(self, args, embeddings, char_vocab):
super(Net, self).__init__()
self.args = args
self.num_features = args.num_features
self.nhid = args.nhid
self.pooling_ratio = args.pooling_ratio
self.dynamic_pooling1 = args.dynamic_pooling1
self.dynamic_pooling2 = args.dynamic_pooling2
self.dynamic_pooling3 = args.dynamic_pooling3
self.conv1 = GCNConv(self.num_features, self.nhid)
self.pool1 = KGPool(self.nhid, ratio=self.pooling_ratio_1_2, dynamic_pooling=self.dynamic_pooling1)
self.conv2 = GCNConv(self.nhid, self.nhid)
self.pool2 = KGPool(self.nhid, ratio=self.pooling_ratio_1_2, dynamic_pooling=self.dynamic_pooling2)
self.conv3 = GCNConv(self.nhid, self.nhid)
self.pool3 = KGPool(self.nhid, ratio=self.pooling_ratio_3, dynamic_pooling=self.dynamic_pooling3)
self.nf = NodeFeature(args.input_dim, args.hidden_dim, args.layers, args.is_bidirectional, args.drop_out_rate, args.entity_embed_dim, args.conv_filter_size, embeddings, args.char_embed_dim, args.max_word_len_entity, char_vocab, args.char_feature_size)
def forward(self, words, chars, edge_index, batch, entity_indices, sent_indices, epoch = None):
#s = get the nodes for s too and ensure that it is not getting pooled
#(ensure that e1 and e2 are not getting pooled in pool1)
node1_indices = entity_indices[:,0]
node2_indices = entity_indices[:,1]
sent_indices = torch.flatten(sent_indices)
x = self.nf(words, chars)
x = F.relu(self.conv1(x, edge_index))
x, edge_index, _, batch, _, node1_indices, node2_indices, sent_indices = self.pool1(x, edge_index, None, batch, node1_indices, node2_indices, sent_indices)
x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
e1_x1 = x[node1_indices]
e2_x1 = x[node2_indices]
s_x1 = x[sent_indices]
x = F.relu(self.conv2(x, edge_index))
x, edge_index, _, batch, _, node1_indices, node2_indices, sent_indices = self.pool2(x, edge_index, None, batch, node1_indices, node2_indices, sent_indices)
x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
e1_x2 = x[node1_indices]
e2_x2 = x[node2_indices]
s_x2 = x[sent_indices]
x = F.relu(self.conv3(x, edge_index))
x, edge_index, _, batch, _, node1_indices, node2_indices, sent_indices = self.pool3(x, edge_index, None, batch, node1_indices, node2_indices, sent_indices)
x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
e1_x3 = x[node1_indices]
e2_x3 = x[node2_indices]
s_x3 = x[sent_indices]
e1_cat = torch.cat([e1_x1,e1_x2,e1_x3], dim=1)
e2_cat = torch.cat([e2_x1,e2_x2,e2_x3], dim=1)
s_cat = torch.cat([s_x1,s_x2,s_x3], dim=1)
x = x1 + x2 + x3
x = torch.cat([e1_cat,e2_cat,s_cat,x],dim=1)
return x
|
the-stack_106_16695
|
_base_ = [
'../../../../_base_/default_runtime.py',
'../../../../_base_/datasets/coco_wholebody_hand.py'
]
evaluation = dict(
interval=10, metric=['PCK', 'AUC', 'EPE'], key_indicator='AUC')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=21,
dataset_joints=21,
dataset_channel=[
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20
],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20
])
# model settings
model = dict(
type='TopDown',
pretrained=None,
backbone=dict(
type='LiteHRNet',
in_channels=3,
extra=dict(
stem=dict(stem_channels=32, out_channels=32, expand_ratio=1),
num_stages=3,
stages_spec=dict(
num_modules=(2, 4, 2),
num_branches=(2, 3, 4),
num_blocks=(2, 2, 2),
module_type=('LITE', 'LITE', 'LITE'),
with_fuse=(True, True, True),
reduce_ratios=(8, 8, 8),
num_channels=(
(40, 80),
(40, 80, 160),
(40, 80, 160, 320),
)),
with_head=True,
)),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=40,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'])
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=90, scale_factor=0.3),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=['image_file', 'center', 'scale', 'rotation', 'flip_pairs']),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='HandCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_train_v1.0.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='HandCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='HandCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)
|
the-stack_106_16696
|
import cupy
def eye(m, n=None, k=0, dtype='d', format=None):
"""Creates a sparse matrix with ones on diagonal.
Args:
m (int): Number of rows.
n (int or None): Number of columns. If it is ``None``,
it makes a square matrix.
k (int): Diagonal to place ones on.
dtype: Type of a matrix to create.
format (str or None): Format of the result, e.g. ``format="csr"``.
Returns:
cupy.sparse.spmatrix: Created sparse matrix.
.. seealso:: :func:`scipy.sparse.eye`
"""
if n is None:
n = m
m, n = int(m), int(n)
if m == n and k == 0:
if format in ['csr', 'csc']:
indptr = cupy.arange(n + 1, dtype='i')
indices = cupy.arange(n, dtype='i')
data = cupy.ones(n, dtype=dtype)
if format == 'csr':
cls = cupy.sparse.csr_matrix
else:
cls = cupy.sparse.csc_matrix
return cls((data, indices, indptr), (n, n))
elif format == 'coo':
row = cupy.arange(n, dtype='i')
col = cupy.arange(n, dtype='i')
data = cupy.ones(n, dtype=dtype)
return cupy.sparse.coo_matrix((data, (row, col)), (n, n))
diags = cupy.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def identity(n, dtype='d', format=None):
"""Creates an identity matrix in sparse format.
.. note::
Currently it only supports csr, csc and coo formats.
Args:
n (int): Number of rows and columns.
dtype: Type of a matrix to create.
format (str or None): Format of the result, e.g. ``format="csr"``.
Returns:
cupy.sparse.spmatrix: Created identity matrix.
.. seealso:: :func:`scipy.sparse.identity`
"""
return eye(n, n, dtype=dtype, format=format)
def spdiags(data, diags, m, n, format=None):
"""Creates a sparse matrix from diagonals.
Args:
data (cupy.ndarray): Matrix diagonals stored row-wise.
diags (cupy.ndarray): Diagonals to set.
m (int): Number of rows.
n (int): Number of cols.
format (str or None): Sparse format, e.g. ``format="csr"``.
Returns:
cupy.sparse.spmatrix: Created sparse matrix.
.. seealso:: :func:`scipy.sparse.spdiags`
"""
return cupy.sparse.dia_matrix((data, diags), shape=(m, n)).asformat(format)
def random(m, n, density=0.01, format='coo', dtype=None,
random_state=None, data_rvs=None):
"""Generates a random sparse matrix.
This function generates a random sparse matrix. First it selects non-zero
elements with given density ``density`` from ``(m, n)`` elements.
So the number of non-zero elements ``k`` is ``k = m * n * density``.
Value of each element is selected with ``data_rvs`` function.
Args:
m (int): Number of rows.
n (int): Number of cols.
density (float): Ratio of non-zero entries.
format (str): Matrix format.
dtype (dtype): Type of the returned matrix values.
random_state (cupy.random.RandomState or int):
State of random number generator.
If an integer is given, the method makes a new state for random
number generator and uses it.
If it is not given, the default state is used.
This state is used to generate random indexes for nonzero entries.
data_rvs (callable): A function to generate data for a random matrix.
If it is not given, `random_state.rand` is used.
Returns:
cupy.sparse.spmatrix: Generated matrix.
.. seealso:: :func:`scipy.sparse.random`
"""
if density < 0 or density > 1:
raise ValueError('density expected to be 0 <= density <= 1')
dtype = cupy.dtype(dtype)
if dtype.char not in 'fd':
raise NotImplementedError('type %s not supported' % dtype)
mn = m * n
k = int(density * m * n)
if random_state is None:
random_state = cupy.random
elif isinstance(random_state, (int, cupy.integer)):
random_state = cupy.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
ind = random_state.choice(mn, size=k, replace=False)
j = cupy.floor(ind * (1. / m)).astype('i')
i = ind - j * m
vals = data_rvs(k).astype(dtype)
return cupy.sparse.coo_matrix(
(vals, (i, j)), shape=(m, n)).asformat(format)
def rand(m, n, density=0.01, format='coo', dtype=None, random_state=None):
"""Generates a random sparse matrix.
See ``cupy.sparse.random`` for detail.
Args:
m (int): Number of rows.
n (int): Number of cols.
density (float): Ratio of non-zero entries.
format (str): Matrix format.
dtype (dtype): Type of the returned matrix values.
random_state (cupy.random.RandomState or int):
State of random number generator.
If an integer is given, the method makes a new state for random
number generator and uses it.
If it is not given, the default state is used.
This state is used to generate random indexes for nonzero entries.
Returns:
cupy.sparse.spmatrix: Generated matrix.
.. seealso:: :func:`scipy.sparse.rand`
.. seealso:: :func:`cupy.sparse.random`
"""
return random(m, n, density, format, dtype, random_state)
|
the-stack_106_16697
|
# Minibatch Size
BATCH_SIZE = 32
# Gradient clip threshold
GRAD_CLIP = 10
# Learning rate
LEARNING_RATE = 0.0005
# Maximum number of steps in BPTT
GRAD_STEPS = -1
# Number of epochs for training
NUM_EPOCHS = 10
# do validation every VALIDATION_FREQ iterations
VALIDATION_FREQ = 100
# maximum word length for character model
MAX_WORD_LEN = 10
# dataset params
def get_params(dataset):
if dataset=='cbtcn':
return cbtcn_params
elif dataset=='wdw' or dataset=='wdw_relaxed':
return wdw_params
elif dataset=='cnn':
return cnn_params
elif dataset=='dailymail':
return dailymail_params
elif dataset=='cbtne':
return cbtne_params
else:
raise ValueError("Dataset %s not found"%dataset)
cbtcn_params = {
'nhidden' : 128,
'char_dim' : 25,
'dropout' : 0.4,
'word2vec' : 'data/word2vec_glove.txt',
'train_emb' : 0,
'use_feat' : 1,
}
wdw_params = {
'nhidden' : 128,
'char_dim' : 25,
'dropout' : 0.3,
'word2vec' : 'data/word2vec_glove.txt',
'train_emb' : 0,
'use_feat' : 1,
}
cnn_params = {
'nhidden' : 256,
'char_dim' : 0,
'dropout' : 0.2,
'word2vec' : 'data/word2vec_glove.txt',
'train_emb' : 1,
'use_feat' : 0,
}
dailymail_params = {
'nhidden' : 256,
'char_dim' : 0,
'dropout' : 0.1,
'word2vec' : 'data/word2vec_glove.txt',
'train_emb' : 1,
'use_feat' : 0,
}
cbtne_params = {
'nhidden' : 128,
'char_dim' : 25,
'dropout' : 0.4,
'word2vec' : 'data/word2vec_glove.txt',
'train_emb' : 0,
'use_feat' : 1,
}
|
the-stack_106_16699
|
import json
import random
from app import app, db
from app.models import *
from app.module import *
from instance.config import LINE_CHANNEL_ACCESS_TOKEN, LINE_CHANNEL_SECRET_TOKEN
from flask import (
Flask, request, abort
)
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
SourceUser, SourceGroup, SourceRoom,
TemplateSendMessage, ConfirmTemplate, MessageTemplateAction,
ButtonsTemplate, ImageCarouselTemplate, ImageCarouselColumn, URITemplateAction,
PostbackTemplateAction, DatetimePickerTemplateAction,
CarouselTemplate, CarouselColumn, PostbackEvent,
StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,
ImageMessage, VideoMessage, AudioMessage, FileMessage,
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent
)
line_bot_api = LineBotApi(LINE_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(LINE_CHANNEL_SECRET_TOKEN)
@app.route("/callback", methods=['POST'])
def callback():
# Get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# Get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# Handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(FollowEvent)
def handle_followevent(event):
""" When a FollowEvent is done, it will activate the SignUp Flow"""
confirm_template = ConfirmTemplate(
text='Untuk mengoptimalkan penggunaan aplikasi, apakah anda berkenan untuk registrasi secara otomatis?',
actions=[
PostbackTemplateAction(
label='Iya', text='Iya', data='create_user=confirm'),
PostbackTemplateAction(
label='Tidak', text='Tidak', data='create_user=decline'),
])
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Halo perkenalkan! Nama saya Pandu, disini untuk membantu menjadi pemandu Anda di Smart Environment kita!"),
TemplateSendMessage(
alt_text='User Confirmation', template=confirm_template)])
@handler.add(UnfollowEvent)
def handle_unfollow(event):
app.logger.info("Got Unfollow event")
@handler.add(PostbackEvent)
def handle_postback(event):
command = (event.postback.data).split('=')
findUser = Users.query.filter_by(id=event.source.user_id).first()
if (findUser == None):
# If the user is not found on our database
if (command[0] == 'create_user'):
if (command[1] == 'confirm'):
try :
user_profile = line_bot_api.get_profile(event.source.user_id)
new_user = Users(
id=user_profile.user_id,
name=user_profile.display_name,
location='Jakarta, Indonesia',
latitude=-6.17511,
longitude=106.8650395,
travel_point=0
)
db.session.add(new_user)
# Logging
app.logger.info("Create User Request: " + user_profile.user_id)
db.session.commit()
image_option_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url=feature_thumbnail[0],
action=PostbackTemplateAction(
label='Cari Lokasi', data='location_unregistered=list=None')),
ImageCarouselColumn(image_url=feature_thumbnail[1],
action=MessageTemplateAction(
label='Cuaca Kini', text='Hari ini cuaca nya seperti apa Pan?')),
ImageCarouselColumn(image_url=feature_thumbnail[2],
action=MessageTemplateAction(
label='Pasar Limbah', text='Pan, tolong buka Pasar Limbah')),
ImageCarouselColumn(image_url=feature_thumbnail[3],
action=MessageTemplateAction(
label='Travel Point', text='Pandu, tolong cek deh travel point')),
ImageCarouselColumn(image_url=feature_thumbnail[4],
action=MessageTemplateAction(
label='Go Green', text='Tips and tricks dong untuk jaga lingkungan kita!')),
ImageCarouselColumn(image_url=feature_thumbnail[5],
action=MessageTemplateAction(
label='Buka Web', text='Pandu buka website Official dari Digibot Solution'))
])
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(
text='Berhasil membuat registrasi untuk user {user}'.format(
user=user_profile.display_name)),
TextSendMessage(
text='Untuk mengetahui lingkungan Anda, dapatkah Anda membagikan lokasi Anda dengan mengirimkan Send Location?'),
TextSendMessage(
text='Send Location dapat di temukan di bawah menu, silahkan klik tombol + dan klik Send Location atau bisa menggunakan Rich Menu dibawah'),
TemplateSendMessage(
alt_text='Feature List', template=image_option_template
)
])
except :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Sepertinya ada masalah dalam memperoleh informasi profil Anda"))
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Tahap registrasi di tunda, silahkan registrasi untuk menggunakan aplikasi secara lengkap :)"))
else :
# If the user is found
if (command[0] == 'search_location'):
carousel_limit = 6
sub_command = command[1].split(':')
query, place_name = sub_command
thumnail_query = 'else'
if query in places_thumbnail:
thumnail_query = query
# To calculate travel_option
origin = '{lat},{lng}'.format(lat=findUser.latitude, lng=findUser.longitude)
if (query == 'food'):
# Zomato API Call
restaurant_list = ZomatoAPI().geocode(latitude=findUser.latitude, longitude=findUser.longitude)
if (len(restaurant_list) > 2 and restaurant_list != None):
# The list of all the carousel columns
restaurant_carousel = []
for restaurant in restaurant_list[:carousel_limit]:
destination = '{lat},{lng}'.format(
lat=restaurant['restaurant']['location']['latitude'],
lng=restaurant['restaurant']['location']['longitude'])
# Carousel Column
restaurant_column = CarouselColumn(
title=str(restaurant['restaurant']['name'])[:40],
text=str(restaurant['restaurant']['location']['address'])[:60],
thumbnail_image_url=places_thumbnail[thumnail_query],
actions=[
URITemplateAction(
label='Cek Restoran',
uri=restaurant['restaurant']['url']),
PostbackTemplateAction(
label='Pilihan Perjalanan',
data='travel_option={origin}={destination}'.format(
origin=origin, destination=destination))
])
restaurant_carousel.append(restaurant_column)
food_carousel = CarouselTemplate(columns=restaurant_carousel)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Saya akan carikan tempat {place} didekat posisi Anda...".format(
place=place_name)),
TemplateSendMessage(
alt_text='Restaurant Carousel', template=food_carousel),
TextSendMessage(
text="Jika ingin mencari tempat lain, silahkan tanyakan saja sama Pandu. Pandu tau banyak tempat loh!"
)
])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Maaf...tapi saat ini kita tidak menemukan restaurant di dekat Anda"))
else :
search_places = GoogleMapsAPI().places(query=query, location=(findUser.latitude, findUser.longitude))
places_list = search_places['results']
if (len(places_list) > 2 and places_list != None):
# The list of all the carousel columns
places_carousel = []
# Temporary thumbnail_image
thumbnail_image = 'https://i.imgur.com/EFkDB2M.png'
for places in places_list[:carousel_limit]:
destination = '{lat},{lng}'.format(
lat=places['geometry']['location']['lat'],
lng=places['geometry']['location']['lng'])
# Carousel Column
places_column = CarouselColumn(
title=str(places['name'])[:40],
text=str(places['formatted_address'])[:60],
thumbnail_image_url=places_thumbnail[thumnail_query],
actions=[
URITemplateAction(
label='Cek Peta',
uri='https://www.google.com/maps/search/?api=1&query={destination}'.format(
destination=destination)),
PostbackTemplateAction(
label='Pilihan Perjalanan',
data='travel_option={origin}={destination}'.format(
origin=origin, destination=destination))
])
places_carousel.append(places_column)
search_carousel = CarouselTemplate(columns=places_carousel)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text="Saya akan carikan {place} didekat posisi Anda...".format(
place=place_name)),
TemplateSendMessage(
alt_text='Places Carousel', template=search_carousel),
TextSendMessage(
text="Jika ingin mencari tempat lain, silahkan tanyakan saja sama Pandu. Pandu tau banyak tempat loh!"
)
])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Maaf...tapi saat ini kita tidak menemukan {query} di dekat Anda".format(
query=query)))
elif (command[0] == 'location_unregistered'):
data_search = command[2]
if (command[1] == 'confirm'):
# If places not supported in keyword.json, make an attempt
# to search via passed in string as argument
location_confirm = ConfirmTemplate(text='Apakah anda sedang berada di {location}?'.format(location=findUser.location),
actions=[
PostbackTemplateAction(
label='Iya', text='Iya', data='search_location={search}'.format(search=data_search)),
PostbackTemplateAction(
label='Tidak', text='Tidak', data='location_unregistered=decline=None')
])
line_bot_api.reply_message(
event.reply_token,[
LocationSendMessage(
title='Posisi Terakhir Anda', address='{0}'.format(findUser.location),
latitude=findUser.latitude, longitude=findUser.longitude
),
TemplateSendMessage(
alt_text='Location Confirmation', template=location_confirm)
])
elif (command[1] == 'decline'):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text='Baiklah, membatalkan mencari lokasi "{search}"'.format(
search=data_search
)))
elif (command[1] == 'list'):
image_option_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url=places_thumbnail['food'],
action=MessageTemplateAction(
label='Makan', text='Carikan tempat makan di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['movie theater'],
action=MessageTemplateAction(
label='Bioskop', text='Carikan bioskop di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['minimarket'],
action=MessageTemplateAction(
label='Minimarket', text='Carikan minimartket di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['bus station'],
action=MessageTemplateAction(
label='Halte Bus', text='Carikan halte bus di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['else'],
action=MessageTemplateAction(
label='Lainnya', text='Carikan {place} di dekat lokasi saya'.format(
place=random.choice(['barber', 'atm', 'toko buku', 'salon', 'bengkel', 'rumah sakit', 'perpustakaan'])
))),
])
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text="Berikut adalah hanya beberapa pilihan dari fitur Location Finder, jangan lupa untuk memperbarui lokasi Anda untuk mengoptimalkan penggunaan"),
TemplateSendMessage(
alt_text='Pilihan Lokasi Pencarian', template=image_option_template),
TextSendMessage(text="Pandu bisa mencari tempat lebih banyak dari ini, coba tanyakan saja sama Pandu")
])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Baiklah, silahkan perbarui lokasi Anda dengan mengirimkan lokasi dengan Rich Menu dibawah"))
elif (command[0] == 'travel_option'):
origin = command[1]
destination = command[2]
coordinate = [findUser.latitude, findUser.longitude]
dist_calculation = GoogleMapsAPI().distanceCalculate(origin, destination)
dist_cut = dist_calculation['rows'][0]['elements'][0]
distance = {
"text" : dist_cut['distance']['text'],
"value" : dist_cut['distance']['value'],
"duration" : dist_cut['duration']['text']
}
with open('data/travelopt.json', 'r') as travelopt:
travel_options = json.load(travelopt)
travel_carousel = []
thumbnail_image = 'https://i.imgur.com/EFkDB2M.png'
for options in travel_options:
travel_column = ImageCarouselColumn(
image_url=options['thumbnail_image'],
action=URITemplateAction(
label=options['label'],
uri=(options['uri']).format(
origin=origin,
destination=destination
)))
if (distance['value'] >= 5000):
# Don't recommend walking more than 5km
if (options['label'] != 'Jalan Kaki'):
travel_carousel.append(travel_column)
else :
travel_carousel.append(travel_column)
travel_option_template = ImageCarouselTemplate(columns=travel_carousel)
# Weather API
with open('data/weathermapping.json', 'r') as wm:
weather_mapping = json.load(wm)
get_weather = OpenWeatherAPI().current_weather(coordinate=coordinate)
current_weather = None
for id, name in weather_code_range:
if (get_weather['weather'][0]['id'] in id):
current_weather = name
break
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Saya perkirakan bahwa Anda akan tiba pada lokasi dalam {time}".format(
time=distance['duration'])),
TextSendMessage(
text="Dengan jarak {range}, di bawah adalah rekomendasian perjalanan".format(
range=distance['text'])),
TemplateSendMessage(
alt_text='Pilihan Perjalanan', template=travel_option_template),
TextSendMessage(
text="Cuaca di luar terlihat {weather}, {prompt}.".format(
weather=weather_mapping[current_weather]['name'],
prompt=weather_mapping[current_weather]['prompt']
))
])
elif (command[0] == 'point_exchange'):
promotion_category = command[1]
# Find Categories from database and iterate over them like a list
findPromotion = TravelPointPromotion.query.filter_by(promotion_category=promotion_category).all()
promotion_carousel = []
for promotion in findPromotion:
promotion_column = CarouselColumn(
title=str(promotion.promotion_name)[:40],
text=str(promotion.promotion_description)[:60],
actions=[
PostbackTemplateAction(
label='Tukar Point',
data="point_exchange_confirm={promotion_id}".format(
promotion_id=promotion.promotion_id
)),
PostbackTemplateAction(
label='Cek Harga Point',
data="check_promotion_price={cost}".format(
cost=promotion.promotion_cost
))
])
promotion_carousel.append(promotion_column)
promotion_template_carousel = CarouselTemplate(columns=promotion_carousel)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Saya akan carikan point exchange untuk kategori {category}".format(
category=promotion_category)),
TemplateSendMessage(
alt_text='Promotion Carousel', template=promotion_template_carousel)
])
elif (command[0] == 'point_exchange_confirm'):
promotion_onconfirm = command[1]
findPromotion = TravelPointPromotion.query.filter_by(promotion_id=promotion_onconfirm).first()
if (findUser.travel_point > findPromotion.promotion_cost):
findUser.travel_point -= findPromotion.promotion_cost
db.session.commit()
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(
text="Selamat Anda telah membeli promosi {name}, sisa poin Anda sekarang {point}".format(
name=findPromotion.promotion_name,
point=findUser.travel_point
)),
TextSendMessage(
text="Promotion Secret : {secret} ".format(
secret=findPromotion.promotion_secret
))
])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Point Anda kurang untuk melakukan transaksi ini"))
elif (command[0] == 'check_promotion_price'):
cost_of_promotion = command[1]
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Dibutuhkan {cost} point untuk melakukan transaksi ini, Anda memiliki {point}".format(
cost=cost_of_promotion,
point=findUser.travel_point
)))
elif (command[0] == 'waste_market'):
waste_category = command[1]
findMarket = MarketPlaceDatabase.query.filter_by(market_demand=waste_category).all()
if (len(findMarket) >= 2):
market_carousel = []
for market in findMarket:
market_column = CarouselColumn(
title=str(market.market_name)[:40],
text="Rp{price}/kg\n{description}\nMore...".format(
price=str(market.market_price),
description=market.market_description[:45]
),
actions=[
PostbackTemplateAction(
label='Deskripsi',
data="waste_market_info={market_id}".format(
market_id=market.market_id
)),
URITemplateAction(
label='Contact',
uri="tel:{number}".format(
number=market.market_owner_number
))
])
market_carousel.append(market_column)
market_template_carousel = CarouselTemplate(columns=market_carousel)
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Mencari Pasar limbah dalam kategori {category}".format(
category=waste_category)),
TemplateSendMessage(
alt_text='Waste Market Carousel', template=market_template_carousel)
])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Sementara, Pasar Limbah dengan kategori {category} belum diisi, silahkan menambahkan di halaman http://location-linebot.herokuapp.com/store/add".format(
category=waste_category
)))
elif (command[0] == 'waste_market_info'):
passed_market_id = command[1]
findMarket = MarketPlaceDatabase.query.filter_by(market_id=passed_market_id).first()
description_string = "{name}\n{owner}\n\n{demand}\n{description}\n\n{additional}\n\nRp{price}/kg\n\nContact\n{line} - {number}".format(
name=findMarket.market_name,
owner=findMarket.market_owner,
demand=findMarket.market_demand,
description=findMarket.market_description,
additional=findMarket.market_additional,
price=findMarket.market_price,
line=findMarket.market_owner_line_id,
number=findMarket.market_owner_number
)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text=description_string))
elif (command[0] == 'create_user'):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Anda sudah melakukan registrasi otomatis"))
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Sepertinya ada masalah dalam PostbackEvent Anda"))
@handler.add(MessageEvent, message=LocationMessage)
def handle_location_message(event):
findUser = Users.query.filter_by(id=event.source.user_id).first()
if (findUser != None):
try:
findUser.location = (event.message.address)[:100]
findUser.latitude = event.message.latitude
findUser.longitude = event.message.longitude
db.session.commit()
image_option_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url=places_thumbnail['food'],
action=MessageTemplateAction(
label='Makan', text='Carikan tempat makan di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['movie theater'],
action=MessageTemplateAction(
label='Bioskop', text='Carikan bioskop di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['minimarket'],
action=MessageTemplateAction(
label='Minimarket', text='Carikan minimartket di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['bus station'],
action=MessageTemplateAction(
label='Halte Bus', text='Carikan halte bus di dekat lokasi saya')),
ImageCarouselColumn(image_url=places_thumbnail['else'],
action=MessageTemplateAction(
label='Lainnya', text='Carikan {place} di dekat lokasi saya'.format(
place=random.choice(['restoran', 'atm', 'tempat potong rambut', 'salon', 'halte bus', 'warung', 'bioskop'])
))),
])
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text="Lokasi Anda sudah diperbarui!"),
TemplateSendMessage(
alt_text='Pilihan Aplikasi', template=image_option_template),
TextSendMessage(text='Pandu bisa mencari tempat lebih banyak dari ini, coba tanyakan saja sama Pandu')
])
except :
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text="Lokasi Anda tidak berhasil diperbarui!"),
TextSendMessage(text="Silahkan coba lagi nanti")
])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Sepertinya Anda belum registrasi, silahkan registrasi terlebih dahulu"))
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
""" Here's all the messages will be handled and processed by the program """
msg = (event.message.text).lower()
findUser = Users.query.filter_by(id=event.source.user_id).first()
if (findUser != None):
with open('data/keyword.json', 'r') as keyword_list:
keyword = json.load(keyword_list)
if ('cari' in msg):
data_search = None
# In keyword.json, iterate over the json
# to find a match to any keyword in msg
for key, value in keyword['search'].items():
for word in value:
if (word in msg):
data_search = key
break
# If data_search is not updated, then search is not found
if (data_search is None):
# If no search is found by the keyword, then ask the user if they still want an answer
# By searching for the whole message
search_for = msg + ':hasil pencarian'
search_confirm = ConfirmTemplate(
text='Sepertinya kata kunci ini belum di registrasikan secara resmi oleh Pandu, apakah ingin tetap mencari "{message}"?'.format(
message=msg
),
actions=[
PostbackTemplateAction(
label='Iya', text='Iya', data='location_unregistered=confirm={search}'.format(
search=search_for)),
PostbackTemplateAction(
label='Tidak', text='Tidak', data='location_unregistered=decline={search}'.format(
search=search_for
))
])
line_bot_api.reply_message(
event.reply_token,[
TemplateSendMessage(
alt_text='Unknown Keyword Confirmation', template=search_confirm),
TextSendMessage(
text='Hasil pencarian mungkin tidak akurat karena kata kunci belum terdaftar secara resmi sebagai titik pencarian yang valid.'
)
])
else :
# this line will execute if it has found a match in keyword.json
location_confirm = ConfirmTemplate(text='Apakah anda sedang berada di {location}?'.format(location=findUser.location),
actions=[
PostbackTemplateAction(
label='Iya', text='Iya', data='search_location={search}'.format(search=data_search)),
PostbackTemplateAction(
label='Tidak', text='Tidak', data='location_unregistered=None=None')
])
line_bot_api.reply_message(
event.reply_token,[
LocationSendMessage(
title='Posisi Terakhir Anda', address=findUser.location,
latitude=findUser.latitude, longitude=findUser.longitude
),
TemplateSendMessage(
alt_text='Location Confirmation', template=location_confirm)
])
elif ('cuaca' in msg):
coordinate = [findUser.latitude, findUser.longitude]
get_weather = OpenWeatherAPI().current_weather(coordinate=coordinate)
with open('data/weathermapping.json', 'r') as wm:
weather_mapping = json.load(wm)
for id, name in weather_code_range:
if (get_weather['weather'][0]['id'] in id):
current_weather = name
break
if (get_weather['cod'] == 200 and current_weather != None):
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Coba Pandu cek di openweathermap.org"
),
TextSendMessage(
text="Cuaca di luar terlihat {weather}, {prompt}.".format(
weather=weather_mapping[current_weather]['name'],
prompt=weather_mapping[current_weather]['prompt']
))])
else :
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Sepertinya kita sedang mengalami masalah mendapatkan cuaca tempat Anda, silahkan mencoba lagi dalam beberapa saat."))
elif ('bersih' in msg or 'lingkungan' in msg):
with open('data/envtips.json', 'r') as envtips:
env_json = json.load(envtips)
randomize_article = random.choice(env_json)
article_string = "{title}\n\n{tips}\n\n{source}".format(
title=randomize_article['title'],
tips=randomize_article['tips'],
source=randomize_article['source']
)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text=article_string))
elif ('token' in msg):
input_token = (msg.split()[1]).upper()
find_token = TravelPointToken.query.filter_by(token_id=input_token).first()
if (find_token != None):
findUser.travel_point += find_token.token_point_value
app.logger.info('{user} gained {value} points from {provider_name}'.format(
user=findUser.id,
value=find_token.token_point_value,
provider_name=find_token.token_name
))
find_token.token_point_visitor += 1
db.session.commit()
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Selamat! Anda mendapatkan {value} Points dari {provider_name}".format(
value=find_token.token_point_value,
provider_name=find_token.token_name
)),
TextSendMessage(
text="Travel point Anda sekarang {point} token telah dimasukkan : {token}".format(
point=findUser.travel_point,
token=input_token
))
])
else :
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(
text="Token tidak dikenal oleh Pandu, coba cek kembali token yang di berikan"),
TextSendMessage(
text="Travel point Anda sekarang {point}".format(
point=findUser.travel_point
))
])
elif ('tukar' in msg or 'tuker' in msg or 'penukaran' in msg):
thumbnail_image = feature_thumbnail[3]
exchange_option_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Belanja', data='point_exchange=shop')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Makan Murah', data='point_exchange=food')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Voucher Game', data='point_exchange=game')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Isi Pulsa', data='point_exchange=pulsa')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Tiket Murah', data='point_exchange=tiket'))
])
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text="Silahkan Pilih dari kategori yang kami sediakan!"),
TemplateSendMessage(
alt_text='Pilihan Tukar Point', template=exchange_option_template)
])
elif ('cek' in msg):
point_template = ConfirmTemplate(
text='Ingin melihat pilihan penukaran Travel Point?',
actions=[
MessageTemplateAction(
label='Iya', text='Iya, bukakan pilihan penukaran Travel Point'),
MessageTemplateAction(
label='Tidak', text='Tidak'),
])
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(
text="Travel Point Anda sekarang {point}".format(
point=findUser.travel_point
)),
TemplateSendMessage(
alt_text='Konfirmasi Pilihan Penukaran Point', template=point_template
)
])
elif ('web' in msg):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Ini website buatan Digibot Solution http://location-linebot.herokuapp.com"
))
elif ('pandu' in msg and 'id' in msg):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Pandu ID : {id}\nTolong di rahasiakan.".format(
id=(findUser.id)[:12]
)))
elif ('pasar' in msg and 'limbah' in msg):
thumbnail_image = feature_thumbnail[2]
market_option_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Plastik', data='waste_market=plastik')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Kertas', data='waste_market=kertas')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Kardus', data='waste_market=kardus')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Kayu', data='waste_market=kayu')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Gelas', data='waste_market=gelas')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Beling', data='waste_market=beling')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Baterai', data='waste_market=baterai')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='E-Waste', data='waste_market=electronic waste')),
ImageCarouselColumn(image_url=thumbnail_image,
action=PostbackTemplateAction(
label='Lainnya', data='waste_market=lainnya')),
])
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text="Silahkan cari dari kategori yang kami sediakan!"),
TemplateSendMessage(
alt_text='Pilihan Kategori Pasar Limbah', template=market_option_template),
TextSendMessage(text="Ingin membuka pasar sendiri? Tinggal buka link ini http://location-linebot.herokuapp.com/store/add")
])
else :
# Interaction
interaction_response = None
with open('data/speech.json', 'r') as speechwords:
speech = json.load(speechwords)
for key, value in keyword['interaction'].items():
for word in value:
if (word in msg.split()):
interaction_response = (random.choice(speech['speech'][key]['answer']).format(
name = findUser.name,
baseball = 'baseball'
))
break
if (interaction_response != None):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text=interaction_response))
else :
if (('iya' not in msg.split() and 'tidak' not in msg.split()) or 'guide' in msg.split()):
if ('guide' in msg.split()):
guide_string = "Ini adalah semua fitur yang Pandu bisa lakukan saat ini, jika ingin bertanya apapun sama Pandu, silahkan tanya saja."
else :
guide_string = "Pandu tidak mengenal kata-kata dalam percakapan, mungkin ada yang bisa Pandu bantu?"
image_option_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url=feature_thumbnail[0],
action=PostbackTemplateAction(
label='Cari Lokasi', data='location_unregistered=list=None')),
ImageCarouselColumn(image_url=feature_thumbnail[1],
action=MessageTemplateAction(
label='Cuaca Kini', text='Hari ini cuaca nya seperti apa Pan?')),
ImageCarouselColumn(image_url=feature_thumbnail[2],
action=MessageTemplateAction(
label='Pasar Limbah', text='Pan, tolong buka Pasar Limbah')),
ImageCarouselColumn(image_url=feature_thumbnail[3],
action=MessageTemplateAction(
label='Travel Point', text='Pandu, tolong cek deh travel point')),
ImageCarouselColumn(image_url=feature_thumbnail[4],
action=MessageTemplateAction(
label='Go Green', text='Tips and tricks dong untuk jaga lingkungan kita!')),
ImageCarouselColumn(image_url=feature_thumbnail[5],
action=MessageTemplateAction(
label='Buka Web', text='Pandu buka website Official dari Digibot Solution'))
])
line_bot_api.reply_message(
event.reply_token,[
TextSendMessage(text=guide_string),
TemplateSendMessage(
alt_text='Guide Pandu Bot', template=image_option_template)
])
else :
if ('iya' not in msg and 'tidak' not in msg):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Sepertinya Anda belum registrasi, silahkan registrasi terlebih dahulu"))
@handler.default()
def default(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text="Jenis obrolan tidak didukung oleh ..."))
|
the-stack_106_16702
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the nearest matching hex color lookup"""
import time
from random import randint
from colored import fg, bg, attr
from colored.hex import HEX, _xterm_colors
def compare_with_expected( in_hex, expected ):
nearest = HEX( in_hex )
# Look up the matched hex value (e.g. xterm colors 10 and 46 are
# the same hex value)
match = _xterm_colors[nearest] == _xterm_colors[expected]
e_str = '%s%s##%s' % (fg( expected ), bg( expected ), attr('reset'))
n_str = '%s%s##%s' % (fg( nearest ), bg( nearest ), attr('reset'))
print( "%s: %s => %s = %s" % ( 'pass' if match else 'FAIL', in_hex, n_str, e_str) )
return match
def main():
print( ' Nearest Expected' )
test_set = {
'1': ('#7f0000', '#800000', '#810000'),
'2': ('#007f00', '#008000', '#008100'),
'4': ('#00007f', '#000080', '#000081'),
'10': ('#00fe00', '#00ff00', '#01ff00'),
}
all_ok = True
for expected, hexes in test_set.items():
for hex in hexes:
ok = compare_with_expected( hex, expected )
all_ok = all_ok and ok
try:
T_sta = time.perf_counter()
print( '-'*78 )
for y in range(0,0xF):
r_row = ''
g_row = ''
b_row = ''
i_row = ''
for x in range(0,0xF):
c = x + y*0xF
hex = '#%02x0000' % (c,)
r_row += '%s%s#' % (fg(hex),bg(hex))
hex = '#00%02x00' % (c,)
g_row += '%s%s#' % (fg(hex),bg(hex))
hex = '#0000%02x' % (c,)
b_row += '%s%s#' % (fg(hex),bg(hex))
hex = '#'+('%02x' % (c,))*3
i_row += '%s%s#' % (fg(hex),bg(hex))
print( '%s%s %s%s %s%s %s%s' % (r_row,attr('reset'), g_row,attr('reset'), b_row,attr('reset'), i_row,attr('reset')) )
dT = time.perf_counter() - T_sta
print( 'Lookup time: %0.4f s => %0.4f s/lookup' % (dT, dT / (2*4*0xFF) ) )
print( '-'*78 )
except Exception as e:
print( 'Whopsie, something %s-ish went wrong: %s' % (e.__class__.__name__, e) )
import traceback
traceback.print_exc()
all_ok = False
# This is just for fun, almost... let's call it a
# "non-deterministic check that it doesn't throw any exceptions"
try:
T_sta = time.perf_counter()
from random import randint
for y in range(0,20):
for x in range(0,30):
rnd = randint(0,0xffffff)
hex = '#%06x' % (rnd,)
hexinv = '#%06x' % (0xffffff-rnd,)
print( '%s%s::' % (fg(hexinv),bg(hex)), end='')
print( attr('reset') )
dT = time.perf_counter() - T_sta
print( 'Lookup time: %0.4f s => %0.4f s/lookup' % (dT, dT / (2*30*20) ) )
except Exception as e:
print( 'Whopsie, something %s-ish went wrong: ' % (e.__class__.__name__, e) )
all_ok = False
return all_ok
if __name__ == "__main__":
ok = main()
exit( 0 if ok else 1 )
|
the-stack_106_16703
|
import copy
import datetime
import logging
from math import ceil
import os
from typing import Any, Dict, List, Optional, Text
import rasa.nlu
from rasa.shared.exceptions import RasaException
import rasa.shared.utils.io
import rasa.utils.io
from rasa.constants import MINIMUM_COMPATIBLE_VERSION, NLU_MODEL_NAME_PREFIX
from rasa.nlu import components, utils
from rasa.nlu.classifiers.classifier import IntentClassifier
from rasa.nlu.components import Component, ComponentBuilder
from rasa.nlu.config import RasaNLUModelConfig, component_config_from_pipeline
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.persistor import Persistor
from rasa.shared.nlu.constants import (
TEXT,
ENTITIES,
INTENT,
INTENT_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
)
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.utils import write_json_to_file
from rasa.utils.tensorflow.constants import EPOCHS
logger = logging.getLogger(__name__)
class InvalidModelError(RasaException):
"""Raised when a model failed to load.
Attributes:
message -- explanation of why the model is invalid
"""
def __init__(self, message: Text) -> None:
self.message = message
super(InvalidModelError, self).__init__()
def __str__(self) -> Text:
return self.message
class UnsupportedModelError(RasaException):
"""Raised when a model is too old to be loaded.
Attributes:
message -- explanation of why the model is invalid
"""
def __init__(self, message: Text) -> None:
self.message = message
super(UnsupportedModelError, self).__init__()
def __str__(self) -> Text:
return self.message
class Metadata:
"""Captures all information about a model to load and prepare it."""
@staticmethod
def load(model_dir: Text):
"""Loads the metadata from a models directory.
Args:
model_dir: the directory where the model is saved.
Returns:
Metadata: A metadata object describing the model
"""
try:
metadata_file = os.path.join(model_dir, "metadata.json")
data = rasa.shared.utils.io.read_json_file(metadata_file)
return Metadata(data, model_dir)
except Exception as e:
abspath = os.path.abspath(os.path.join(model_dir, "metadata.json"))
raise InvalidModelError(
f"Failed to load model metadata from '{abspath}'. {e}"
)
def __init__(self, metadata: Dict[Text, Any], model_dir: Optional[Text]):
self.metadata = metadata
self.model_dir = model_dir
def get(self, property_name: Text, default: Any = None) -> Any:
return self.metadata.get(property_name, default)
@property
def component_classes(self):
if self.get("pipeline"):
return [c.get("class") for c in self.get("pipeline", [])]
else:
return []
@property
def number_of_components(self):
return len(self.get("pipeline", []))
def for_component(self, index: int, defaults: Any = None) -> Dict[Text, Any]:
return component_config_from_pipeline(index, self.get("pipeline", []), defaults)
@property
def language(self) -> Optional[Text]:
"""Language of the underlying model"""
return self.get("language")
def persist(self, model_dir: Text):
"""Persists the metadata of a model to a given directory."""
metadata = self.metadata.copy()
metadata.update(
{
"trained_at": datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
"rasa_version": rasa.__version__,
}
)
filename = os.path.join(model_dir, "metadata.json")
write_json_to_file(filename, metadata, indent=4)
class Trainer:
"""Trainer will load the data and train all components.
Requires a pipeline specification and configuration to use for
the training.
"""
def __init__(
self,
cfg: RasaNLUModelConfig,
component_builder: Optional[ComponentBuilder] = None,
skip_validation: bool = False,
model_to_finetune: Optional["Interpreter"] = None,
) -> None:
self.config = cfg
self.skip_validation = skip_validation
self.training_data = None # type: Optional[TrainingData]
if component_builder is None:
# If no builder is passed, every interpreter creation will result in
# a new builder. hence, no components are reused.
component_builder = components.ComponentBuilder()
# Before instantiating the component classes, lets check if all
# required packages are available
if not self.skip_validation:
components.validate_requirements(cfg.component_names)
if model_to_finetune:
self.pipeline = model_to_finetune.pipeline
else:
self.pipeline = self._build_pipeline(cfg, component_builder)
def _build_pipeline(
self, cfg: RasaNLUModelConfig, component_builder: ComponentBuilder
) -> List[Component]:
"""Transform the passed names of the pipeline components into classes."""
pipeline = []
# Transform the passed names of the pipeline components into classes
for index, pipeline_component in enumerate(cfg.pipeline):
component_cfg = cfg.for_component(index)
component = component_builder.create_component(component_cfg, cfg)
components.validate_component_keys(component, pipeline_component)
pipeline.append(component)
if not self.skip_validation:
components.validate_pipeline(pipeline)
return pipeline
def train(self, data: TrainingData, **kwargs: Any) -> "Interpreter":
"""Trains the underlying pipeline using the provided training data."""
self.training_data = data
self.training_data.validate()
context = kwargs
for component in self.pipeline:
updates = component.provide_context()
if updates:
context.update(updates)
# Before the training starts: check that all arguments are provided
if not self.skip_validation:
components.validate_required_components_from_data(
self.pipeline, self.training_data
)
# data gets modified internally during the training - hence the copy
working_data: TrainingData = copy.deepcopy(data)
for i, component in enumerate(self.pipeline):
logger.info(f"Starting to train component {component.name}")
component.prepare_partial_processing(self.pipeline[:i], context)
updates = component.train(working_data, self.config, **context)
logger.info("Finished training component.")
if updates:
context.update(updates)
return Interpreter(self.pipeline, context)
@staticmethod
def _file_name(index: int, name: Text) -> Text:
return f"component_{index}_{name}"
def persist(
self,
path: Text,
persistor: Optional[Persistor] = None,
fixed_model_name: Text = None,
persist_nlu_training_data: bool = False,
) -> Text:
"""Persist all components of the pipeline to the passed path.
Returns the directory of the persisted model."""
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
metadata = {"language": self.config["language"], "pipeline": []}
if fixed_model_name:
model_name = fixed_model_name
else:
model_name = NLU_MODEL_NAME_PREFIX + timestamp
path = os.path.abspath(path)
dir_name = os.path.join(path, model_name)
rasa.shared.utils.io.create_directory(dir_name)
if self.training_data and persist_nlu_training_data:
metadata.update(self.training_data.persist(dir_name))
for i, component in enumerate(self.pipeline):
file_name = self._file_name(i, component.name)
update = component.persist(file_name, dir_name)
component_meta = component.component_config
if update:
component_meta.update(update)
component_meta["class"] = utils.module_path_from_object(component)
metadata["pipeline"].append(component_meta)
Metadata(metadata, dir_name).persist(dir_name)
if persistor is not None:
persistor.persist(dir_name, model_name)
logger.info(
"Successfully saved model into '{}'".format(os.path.abspath(dir_name))
)
return dir_name
class Interpreter:
"""Use a trained pipeline of components to parse text messages."""
# Defines all attributes (& default values)
# that will be returned by `parse`
@staticmethod
def default_output_attributes() -> Dict[Text, Any]:
return {
TEXT: "",
INTENT: {INTENT_NAME_KEY: None, PREDICTED_CONFIDENCE_KEY: 0.0},
ENTITIES: [],
}
@staticmethod
def ensure_model_compatibility(
metadata: Metadata, version_to_check: Optional[Text] = None
) -> None:
from packaging import version
if version_to_check is None:
version_to_check = MINIMUM_COMPATIBLE_VERSION
model_version = metadata.get("rasa_version", "0.0.0")
if version.parse(model_version) < version.parse(version_to_check):
raise UnsupportedModelError(
f"The model version is trained using Rasa Open Source {model_version} "
f"and is not compatible with your current installation ({rasa.__version__}). "
f"This means that you either need to retrain your model "
f"or revert back to the Rasa version that trained the model "
f"to ensure that the versions match up again."
)
@staticmethod
def load(
model_dir: Text,
component_builder: Optional[ComponentBuilder] = None,
skip_validation: bool = False,
new_config: Optional[Dict] = None,
finetuning_epoch_fraction: float = 1.0,
) -> "Interpreter":
"""Create an interpreter based on a persisted model.
Args:
skip_validation: If set to `True`, does not check that all
required packages for the components are installed
before loading them.
model_dir: The path of the model to load
component_builder: The
:class:`rasa.nlu.components.ComponentBuilder` to use.
new_config: Optional new config to use for the new epochs.
finetuning_epoch_fraction: Value to multiply all epochs by.
Returns:
An interpreter that uses the loaded model.
"""
model_metadata = Metadata.load(model_dir)
if new_config:
Interpreter._update_metadata_epochs(
model_metadata, new_config, finetuning_epoch_fraction
)
Interpreter.ensure_model_compatibility(model_metadata)
return Interpreter.create(
model_metadata,
component_builder,
skip_validation,
should_finetune=new_config is not None,
)
@staticmethod
def _get_default_value_for_component(name: Text, key: Text) -> Any:
from rasa.nlu.registry import get_component_class
return get_component_class(name).defaults[key]
@staticmethod
def _update_metadata_epochs(
model_metadata: Metadata,
new_config: Optional[Dict] = None,
finetuning_epoch_fraction: float = 1.0,
) -> Metadata:
for old_component_config, new_component_config in zip(
model_metadata.metadata["pipeline"], new_config["pipeline"]
):
if EPOCHS in old_component_config:
new_epochs = new_component_config.get(
EPOCHS,
Interpreter._get_default_value_for_component(
old_component_config["class"], EPOCHS
),
)
old_component_config[EPOCHS] = ceil(
new_epochs * finetuning_epoch_fraction
)
return model_metadata
@staticmethod
def create(
model_metadata: Metadata,
component_builder: Optional[ComponentBuilder] = None,
skip_validation: bool = False,
should_finetune: bool = False,
) -> "Interpreter":
"""Create model and components defined by the provided metadata.
Args:
model_metadata: The metadata describing each component.
component_builder: The
:class:`rasa.nlu.components.ComponentBuilder` to use.
skip_validation: If set to `True`, does not check that all
required packages for the components are installed
before loading them.
should_finetune: Indicates if the model components will be fine-tuned.
Returns:
An interpreter that uses the created model.
"""
context = {"should_finetune": should_finetune}
if component_builder is None:
# If no builder is passed, every interpreter creation will result
# in a new builder. hence, no components are reused.
component_builder = components.ComponentBuilder()
pipeline = []
# Before instantiating the component classes,
# lets check if all required packages are available
if not skip_validation:
components.validate_requirements(model_metadata.component_classes)
for i in range(model_metadata.number_of_components):
component_meta = model_metadata.for_component(i)
component = component_builder.load_component(
component_meta, model_metadata.model_dir, model_metadata, **context
)
try:
updates = component.provide_context()
if updates:
context.update(updates)
pipeline.append(component)
except components.MissingArgumentError as e:
raise Exception(
"Failed to initialize component '{}'. "
"{}".format(component.name, e)
)
return Interpreter(pipeline, context, model_metadata)
def __init__(
self,
pipeline: List[Component],
context: Optional[Dict[Text, Any]],
model_metadata: Optional[Metadata] = None,
) -> None:
self.pipeline = pipeline
self.context = context if context is not None else {}
self.model_metadata = model_metadata
def parse(
self,
text: Text,
time: Optional[datetime.datetime] = None,
only_output_properties: bool = True,
) -> Dict[Text, Any]:
"""Parse the input text, classify it and return pipeline result.
The pipeline result usually contains intent and entities."""
if not text:
# Not all components are able to handle empty strings. So we need
# to prevent that... This default return will not contain all
# output attributes of all components, but in the end, no one
# should pass an empty string in the first place.
output = self.default_output_attributes()
output["text"] = ""
return output
data = self.default_output_attributes()
data[TEXT] = text
message = Message(data=data, time=time)
for component in self.pipeline:
component.process(message, **self.context)
output = self.default_output_attributes()
output.update(message.as_dict(only_output_properties=only_output_properties))
return output
def featurize_message(self, message: Message) -> Message:
"""
Tokenize and featurize the input message
Args:
message: message storing text to process;
Returns:
message: it contains the tokens and features which are the output of the
NLU pipeline;
"""
for component in self.pipeline:
if not isinstance(component, (EntityExtractor, IntentClassifier)):
component.process(message, **self.context)
return message
|
the-stack_106_16705
|
import json
from collections import defaultdict
class Role(object):
"""
Role class
"""
def __init__(self, Id, Name, Parent):
self.Id = Id
self.Name = Name
self.Parent = Parent
def __roleObjDecoder(obj):
"""
Role decoder
:param obj: object
"""
return Role(obj['Id'], obj['Name'], obj['Parent'])
def setRoles(rolesData, loadDefault=False):
"""
Set roles data
:param rolesData: roles json string
:param loadDefault: True will read the data from Roles.json in /data directory
:return: list of role objects
"""
roles = []
if loadDefault == True:
with open('data/roles.json') as role:
roles = json.loads(role.read(), object_hook=__roleObjDecoder)
else:
roles = json.loads(rolesData, object_hook=__roleObjDecoder)
# validate if no more than 1 Admin role entered
if(sum(u.Parent == 0 for u in roles) > 1):
raise ValueError(
"Invalid value, there should not be more than one Admin role in the system")
# validate if every role has valid parent [except Admin]
for role in roles:
if role.Parent == 0:
continue
if(sum(x.Id == role.Parent for x in roles) == 0):
raise ValueError(
f"Invalid value, role_Id: {role.Id} has invalid parent")
# convert roles to graph
__covertRolesToGraph(roles)
# set global roles variable
globals()["roles"] = roles
return roles
# local variables
graph = defaultdict(list) # graph dictionary
def __covertRolesToGraph(roles):
lastNodes = []
for role in roles:
if role.Parent == 0:
continue
graph[str(role.Parent)].append(str(role.Id))
lastNodes.append(role.Id)
if role.Parent in lastNodes:
lastNodes.remove(role.Parent)
for item in lastNodes:
graph[str(item)] = []
globals()["graph"] = graph
def getRoles():
if "roles" not in globals():
return None
return globals()["roles"]
|
the-stack_106_16706
|
import asyncio
import logging
import pickle
import aiohttp
from aiohttp import web
import sys
from harmonicIO.stream_connector.stream_connector import StreamConnector
from haste.cloud_gateway.auth import is_valid_login
_secret = None
# std_idle_time is in seconds
HIO_MASTER_HOST = '192.168.1.24'
HIO_MASTER_PORT = 8080
# Looks like max_try is broken -- 0 => never try!
sc = StreamConnector(HIO_MASTER_HOST, HIO_MASTER_PORT, max_try=2, std_idle_time=1)
async def handle(request):
# TODO: for security, this should be constant-time equlity compare
if not is_valid_login(request.headers.get('Authorization'), _secret):
return await _401_unauthorized()
text = "Hello!"
return web.Response(text=text)
async def handle_blob(request):
if not is_valid_login(request.headers.get('Authorization'), _secret):
return await _401_unauthorized()
logging.info('blob received!')
original_filename = request.headers['X-HASTE-original_filename']
tag = request.headers['X-HASTE-tag']
original_timestamp = request.headers['X-HASTE-unixtime']
stream_id = request.match_info.get('stream_id')
file = await request.content.read()
metadata = {
'timestamp': original_timestamp,
'original_filename': original_filename,
'tag': tag,
'stream_id': stream_id,
'image_length_bytes': len(file)}
logging.info(metadata)
if tag == 'vironova':
config = {
'container_name': 'benblamey/haste-image-proc:latest',
'container_os': 'ubuntu'}
logging.info(f'accepted tag:{tag}, config:{config}')
# The format of this binary blob is specific to the image analysis code.
# TODO: add link!
pickled_metadata = bytearray(pickle.dumps(metadata))
message_bytes = pickled_metadata + file
logging.info('sending data to HIO...')
sc.send_data(config['container_name'],
config['container_os'],
message_bytes)
elif tag == 'discard':
# This tag simply discards on the server-side. For benchmarking.
logging.info(f'tag {tag} -- discarding blob.')
pass
else:
logging.info(f'rejected tag:{tag}')
return await _412_tag_unknown()
return web.json_response({'result': 'OK!'})
async def _401_unauthorized():
# TODO: this could be an exception?
return web.Response(status=401, # Unauthorized
body='The request has not been applied because it lacks valid authentication credentials for the target resource.',
headers={'WWW-Authenticate': 'Basic realm="HASTE Cloud"'})
async def _412_tag_unknown():
# TODO: this could be an exception?
return web.Response(status=412, # Precondition Failed
body='The request has not been applied because it lacks valid X-HASTE-tag.')
if __name__ == '__main__':
_secret = sys.argv[1]
app = web.Application()
app.add_routes([web.get('/', handle),
web.get('/{name}', handle),
web.post('/stream/{stream_id}', handle_blob)
])
web.run_app(app,
port=8080,
host='0.0.0.0')
|
the-stack_106_16710
|
from __future__ import absolute_import
import json
import os
import re
AUTOMATIC = u"automatic"
MANUAL = u"manual"
TEST_TYPES = [AUTOMATIC, MANUAL]
class TestLoader(object):
def initialize(
self,
exclude_list_file_path,
include_list_file_path,
results_manager,
api_titles
):
self._exclude_list_file_path = exclude_list_file_path
self._include_list_file_path = include_list_file_path
self._results_manager = results_manager
self._tests = {}
self._tests[AUTOMATIC] = {}
self._tests[MANUAL] = {}
self._api_titles = api_titles
def load_tests(self, manifest_file_path):
manifest_file_handle = open(manifest_file_path)
manifest_file = manifest_file_handle.read()
manifest = json.loads(manifest_file)
tests = manifest[u"items"]
include_list = self._load_test_list(self._include_list_file_path)
exclude_list = self._load_test_list(self._exclude_list_file_path)
if u"testharness" in tests:
self._tests[AUTOMATIC] = self._load_tests(
tests=tests[u"testharness"],
exclude_list=exclude_list
)
if u"manual" in tests:
self._tests[MANUAL] = self._load_tests(
tests=tests[u"manual"],
include_list=include_list
)
for api in self._tests[AUTOMATIC]:
for test_path in self._tests[AUTOMATIC][api][:]:
if u"manual" not in test_path:
continue
self._tests[AUTOMATIC][api].remove(test_path)
if not self._is_valid_test(test_path,
include_list=include_list):
continue
if api not in self._tests[MANUAL]:
self._tests[MANUAL][api] = []
self._tests[MANUAL][api].append(test_path)
def _load_tests(self, tests, exclude_list=None, include_list=None):
loaded_tests = {}
for test in tests:
test_path = tests[test][0][0]
if not test_path.startswith("/"):
test_path = "/" + test_path
if self._is_valid_test(test_path, exclude_list, include_list):
api_name = self._parse_api_name(test_path)
if api_name not in loaded_tests:
loaded_tests[api_name] = []
loaded_tests[api_name].append(test_path)
return loaded_tests
def _parse_api_name(self, test_path):
for part in test_path.split(u"/"):
if part == u"":
continue
return part
def _is_valid_test(self, test_path, exclude_list=None, include_list=None):
is_valid = True
if include_list is not None and len(include_list) > 0:
is_valid = False
for include_test in include_list:
pattern = re.compile(u"^" + include_test)
if pattern.match(test_path) is not None:
is_valid = True
break
if not is_valid:
return is_valid
if exclude_list is not None and len(exclude_list) > 0:
is_valid = True
for exclude_test in exclude_list:
pattern = re.compile(u"^" + exclude_test)
if pattern.match(test_path) is not None:
is_valid = False
break
return is_valid
def _load_test_list(self, file_path):
tests = []
if not os.path.isfile(file_path):
return tests
file_handle = open(file_path)
file_content = file_handle.read()
for line in file_content.split():
line = line.replace(u" u", u"")
line = re.sub(r"^#", u"", line)
if line == u"":
continue
tests.append(line)
return tests
def get_tests(
self,
types=[AUTOMATIC, MANUAL],
include_list=[],
exclude_list=[],
reference_tokens=[]
):
loaded_tests = {}
reference_results = self._results_manager.read_common_passed_tests(
reference_tokens)
for test_type in types:
if test_type not in TEST_TYPES:
continue
for api in self._tests[test_type]:
for test_path in self._tests[test_type][api]:
if not self._is_valid_test(test_path, exclude_list,
include_list):
continue
if reference_results is not None and \
test_path not in reference_results[api]:
continue
if api not in loaded_tests:
loaded_tests[api] = []
loaded_tests[api].append(test_path)
return loaded_tests
def get_apis(self):
apis = []
for test_type in TEST_TYPES:
for api in self._tests[test_type]:
in_list = False
for item in apis:
if item["path"] == "/" + api:
in_list = True
break
if in_list:
continue
title = None
for item in self._api_titles:
if item["path"] == "/" + api:
title = item["title"]
break
if title is None:
apis.append({"title": api, "path": "/" + api})
else:
apis.append({"title": title, "path": "/" + api})
return apis
|
the-stack_106_16711
|
from unittest.mock import patch
import pytest
from ariane.apps.core import Ariane
class TestAriane:
"""Test the ariane core class."""
@pytest.mark.parametrize(
('intents', 'error'),
[
[[], None],
[['intent1'], None],
[['intent1', 'intent2'], None],
[['intent1', 'intent1'], AttributeError], # intent already exists
]
)
def test_register(
self, clean_ariane, wit_access_token, language_code, intents, error):
"""Test that register works as expectet.
The register function of ariane should return a AttributeError, if a keyword is already
used or the provided language is not supported.
"""
def test_func():
pass
if error:
with pytest.raises(error):
for intent in intents:
Ariane.register(intent, test_func)
else:
for intent in intents:
Ariane.register(intent, test_func)
ariane = Ariane(language_code)
for intent in intents:
ariane.actions[intent] == test_func
def test_handle(self, ariane_with_intent, wit_access_token, wit_response, language_code):
"""Test that handle is able to take care of a Wit response."""
with patch('wit.Wit.message', lambda x, y: wit_response):
ariane = Ariane(language_code)
assert ariane.handle('') == 'Success!'
|
the-stack_106_16713
|
"""deCONZ binary sensor platform tests."""
from unittest.mock import patch
from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DOMAIN as DECONZ_DOMAIN,
)
from homeassistant.components.deconz.services import SERVICE_DEVICE_REFRESH
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.const import (
ATTR_DEVICE_CLASS,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_registry import async_entries_for_config_entry
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_request,
setup_deconz_integration,
)
async def test_no_binary_sensors(hass, aioclient_mock):
"""Test that no sensors in deconz results in no sensor entities."""
await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 0
async def test_binary_sensors(hass, aioclient_mock, mock_deconz_websocket):
"""Test successful creation of binary sensor entities."""
data = {
"sensors": {
"1": {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"dark": False, "presence": False},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"name": "Temperature sensor",
"type": "ZHATemperature",
"state": {"temperature": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"name": "CLIP presence sensor",
"type": "CLIPPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"name": "Vibration sensor",
"type": "ZHAVibration",
"state": {
"orientation": [1, 2, 3],
"tiltangle": 36,
"vibration": True,
"vibrationstrength": 10,
},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 5
presence_sensor = hass.states.get("binary_sensor.presence_sensor")
assert presence_sensor.state == STATE_OFF
assert (
presence_sensor.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.MOTION
)
presence_temp = hass.states.get("sensor.presence_sensor_temperature")
assert presence_temp.state == "0.1"
assert presence_temp.attributes[ATTR_DEVICE_CLASS] == SensorDeviceClass.TEMPERATURE
assert hass.states.get("binary_sensor.temperature_sensor") is None
assert hass.states.get("binary_sensor.clip_presence_sensor") is None
vibration_sensor = hass.states.get("binary_sensor.vibration_sensor")
assert vibration_sensor.state == STATE_ON
assert (
vibration_sensor.attributes[ATTR_DEVICE_CLASS]
== BinarySensorDeviceClass.VIBRATION
)
vibration_temp = hass.states.get("sensor.vibration_sensor_temperature")
assert vibration_temp.state == "0.1"
assert vibration_temp.attributes[ATTR_DEVICE_CLASS] == SensorDeviceClass.TEMPERATURE
event_changed_sensor = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"presence": True},
}
await mock_deconz_websocket(data=event_changed_sensor)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_ON
await hass.config_entries.async_unload(config_entry.entry_id)
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_UNAVAILABLE
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_tampering_sensor(hass, aioclient_mock, mock_deconz_websocket):
"""Verify tampering sensor works."""
data = {
"sensors": {
"1": {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {
"dark": False,
"lowbattery": False,
"presence": False,
"tampered": False,
},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(hass, aioclient_mock)
ent_reg = er.async_get(hass)
assert len(hass.states.async_all()) == 4
hass.states.get("binary_sensor.presence_sensor_low_battery").state == STATE_OFF
assert (
ent_reg.async_get("binary_sensor.presence_sensor_low_battery").entity_category
is EntityCategory.DIAGNOSTIC
)
presence_tamper = hass.states.get("binary_sensor.presence_sensor_tampered")
assert presence_tamper.state == STATE_OFF
assert (
presence_tamper.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.TAMPER
)
assert (
ent_reg.async_get("binary_sensor.presence_sensor_tampered").entity_category
is EntityCategory.DIAGNOSTIC
)
event_changed_sensor = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"tampered": True},
}
await mock_deconz_websocket(data=event_changed_sensor)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.presence_sensor_tampered").state == STATE_ON
await hass.config_entries.async_unload(config_entry.entry_id)
assert (
hass.states.get("binary_sensor.presence_sensor_tampered").state
== STATE_UNAVAILABLE
)
await hass.config_entries.async_remove(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_allow_clip_sensor(hass, aioclient_mock):
"""Test that CLIP sensors can be allowed."""
data = {
"sensors": {
"1": {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {"on": True, "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"name": "CLIP presence sensor",
"type": "CLIPPresence",
"state": {"presence": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"3": {
"config": {"on": True, "reachable": True},
"etag": "fda064fca03f17389d0799d7cb1883ee",
"manufacturername": "Philips",
"modelid": "CLIPGenericFlag",
"name": "Clip Flag Boot Time",
"state": {"flag": True, "lastupdated": "2021-09-30T07:09:06.281"},
"swversion": "1.0",
"type": "CLIPGenericFlag",
"uniqueid": "/sensors/3",
},
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(
hass, aioclient_mock, options={CONF_ALLOW_CLIP_SENSOR: True}
)
assert len(hass.states.async_all()) == 3
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_OFF
assert hass.states.get("binary_sensor.clip_presence_sensor").state == STATE_OFF
assert hass.states.get("binary_sensor.clip_flag_boot_time").state == STATE_ON
# Disallow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert not hass.states.get("binary_sensor.clip_presence_sensor")
assert not hass.states.get("binary_sensor.clip_flag_boot_time")
# Allow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
assert hass.states.get("binary_sensor.clip_presence_sensor").state == STATE_OFF
assert hass.states.get("binary_sensor.clip_flag_boot_time").state == STATE_ON
async def test_add_new_binary_sensor(hass, aioclient_mock, mock_deconz_websocket):
"""Test that adding a new binary sensor works."""
event_added_sensor = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": {
"id": "Presence sensor id",
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {"on": True, "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
}
await setup_deconz_integration(hass, aioclient_mock)
assert len(hass.states.async_all()) == 0
await mock_deconz_websocket(data=event_added_sensor)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.presence_sensor").state == STATE_OFF
async def test_add_new_binary_sensor_ignored(
hass, aioclient_mock, mock_deconz_websocket
):
"""Test that adding a new binary sensor is not allowed."""
sensor = {
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {"on": True, "reachable": True},
"uniqueid": "00:00:00:00:00:00:00:00-00",
}
event_added_sensor = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": sensor,
}
config_entry = await setup_deconz_integration(
hass,
aioclient_mock,
options={CONF_MASTER_GATEWAY: True, CONF_ALLOW_NEW_DEVICES: False},
)
assert len(hass.states.async_all()) == 0
await mock_deconz_websocket(data=event_added_sensor)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
assert not hass.states.get("binary_sensor.presence_sensor")
entity_registry = er.async_get(hass)
assert (
len(async_entries_for_config_entry(entity_registry, config_entry.entry_id)) == 0
)
aioclient_mock.clear_requests()
data = {"groups": {}, "lights": {}, "sensors": {"1": sensor}}
mock_deconz_request(aioclient_mock, config_entry.data, data)
await hass.services.async_call(DECONZ_DOMAIN, SERVICE_DEVICE_REFRESH)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.presence_sensor")
|
the-stack_106_16714
|
print('Gerador de PA (Advanced)')
print('-=' * 8)
pt = int(input('Primeiro Termo: '))
r = int(input('Razão da PA: '))
c = 10
count = 0
while c != 0:
d = c
while d != 0:
count += 1
d -= 1
print('{} -> '.format(pt), end='')
pt += r
print('PAUSA')
c = int(input('Quantos termos a mais você quer? '))
print('Programa finalizado com {} termos da PA contados, tenha um bom dia!'.format(count))
|
the-stack_106_16715
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2018. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
import datetime
import tempfile
import os
import io
import mimetypes
import logging
import resilient
from bs4 import BeautifulSoup
from six import string_types
from cachetools import cached, TTLCache
try:
from HTMLParser import HTMLParser as htmlparser
except:
from html.parser import HTMLParser as htmlparser
INCIDENT_FRAGMENT = '#incidents'
PAYLOAD_VERSION = "1.0"
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
LOG.addHandler(logging.StreamHandler())
def build_incident_url(url, incidentId):
"""
build the url to link to an resilient incident
:param url: base url
:param incidentId:
:return: full url
"""
return '/'.join([url, INCIDENT_FRAGMENT, str(incidentId)])
def build_resilient_url(host, port):
"""
build basic url to resilient instance
:param host: host name
:param port: port
:return: base url
"""
if host.lower().startswith("http"):
return "{0}:{1}".format(host, port)
return "https://{0}:{1}".format(host, port)
def clean_html(html_fragment):
"""
Resilient textarea fields return html fragments. This routine will remove the html and insert any code within <div></div>
with a linefeed
:param html_fragment: str presenting the html to clean up
:return: cleaned up code. This may not format well as no presentation of line feeds are preserved in the way supported by
tags such as <br> or <ol>, <ul>, etc. See html2markdown for a better way to translate html input to markdown.
"""
if not html_fragment or not isinstance(html_fragment, string_types):
return html_fragment
s = BeautifulSoup(unescape(html_fragment), "html.parser")
return ' '.join(s.strings)
def unescape(data):
""" Return unescaped data such as > -> >, " -> ', etc.
:param data: text to convert
"""
if data is None:
return None
h = htmlparser()
return h.unescape(data)
def validate_fields(field_list, kwargs):
"""
Ensure each mandatory field in field_list is present in kwargs.
Throw ValueError if not.
field_list can be a list/tuple of strings where each string is
a field name or it can be a list/tuple of dicts where each item
has the attributes 'name' (required) and 'placeholder' (optional).
If the value of the item in kwargs is equal to its placeholder
defined in field_list, a ValueError is raised.
If an item in kwargs is a Resilient Select Function Input, its
value will be a dict that has a 'name' attribute. This returns
the value of 'name'.
If an item in kwargs is a Resilient Multi-Select Function Input, its
value will be a list of dicts that have the 'name' attribute. This
returns a list of the 'name' values for that item.
:param field_list: list/tuple of the mandatory fields. Can be an empty list if no mandatory fields.
:param kwargs: dict of all the fields to search.
:return: a Dictionary of all fields with Select/Multi-Select fields handled.
"""
mandatory_fields = field_list
provided_fields = kwargs
return_fields = {}
mandatory_err_msg = "'{0}' is mandatory and is not set. You must set this value to run this function"
# This is needed to handle something like: validate_fields(('incident_id'), kwargs)
# In this case field_list will be a string and not a tuple
if isinstance(mandatory_fields, string_types):
mandatory_fields = [mandatory_fields]
if not isinstance(mandatory_fields, list) and not isinstance(mandatory_fields, tuple):
raise ValueError("'field_list' must be of type list/tuple, not {0}".format(type(mandatory_fields)))
if not isinstance(provided_fields, dict):
raise ValueError("'kwargs' must be of type dict, not {0}".format(type(provided_fields)))
# Validate that mandatory fields exist + are not equal to their placeholder values
for field in mandatory_fields:
placeholder_value = None
if isinstance(field, dict):
placeholder_value = field.get("placeholder")
field = field.get("name")
# If the field value is a defined empty str, raise an error
if isinstance(provided_fields.get(field), string_types):
if not provided_fields.get(field):
raise ValueError(mandatory_err_msg.format(field))
if provided_fields.get(field) is None:
raise ValueError(mandatory_err_msg.format(field))
if placeholder_value and provided_fields.get(field) == placeholder_value:
raise ValueError(
"'{0}' is mandatory and still has its placeholder value of '{1}'. You must set this value correctly to run this function".format(
field, placeholder_value))
# Loop provided fields and get their value
for field_name, field_value in provided_fields.items():
# Handle if Select Function Input type
if isinstance(field_value, dict) and field_value.get("name"):
field_value = field_value.get("name")
# Handle if 'Text with value string Input' type
elif isinstance(field_value, dict) and field_value.get("content"):
field_value = field_value.get("content")
# Handle if Multi-Select Function Input type
elif isinstance(field_value, list):
field_value = [f.get("name") for f in field_value]
return_fields[field_name] = field_value
return return_fields
def get_file_attachment(res_client, incident_id, artifact_id=None, task_id=None, attachment_id=None):
"""
call the Resilient REST API to get the attachment or artifact data
:param res_client: required for communication back to resilient
:param incident_id: required
:param artifact_id: optional
:param task_id: optional
:param attachment_id: optional
:return: byte string of attachment
"""
if incident_id and artifact_id:
data_uri = "/incidents/{}/artifacts/{}/contents".format(incident_id, artifact_id)
elif attachment_id:
if task_id:
data_uri = "/tasks/{}/attachments/{}/contents".format(task_id, attachment_id)
elif incident_id:
data_uri = "/incidents/{}/attachments/{}/contents".format(incident_id, attachment_id)
else:
raise ValueError("task_id or incident_id must be specified with attachment")
else:
raise ValueError("artifact or attachment or incident id must be specified")
# Get the data
return res_client.get_content(data_uri)
def get_file_attachment_metadata(res_client, incident_id, artifact_id=None, task_id=None, attachment_id=None):
"""
call the Resilient REST API to get the attachment or artifact attachment metadata
:param res_client: required for communication back to resilient
:param incident_id: required
:param artifact_id: optional
:param task_id: optional
:param attachment_id: optional
:return: file attachment metadata
"""
if incident_id and artifact_id:
metadata_url = "/incidents/{}/artifacts/{}".format(incident_id, artifact_id)
return res_client.get(metadata_url)["attachment"]
if attachment_id:
if task_id:
metadata_url = "/tasks/{}/attachments/{}".format(task_id, attachment_id)
elif incident_id:
metadata_url = "/incidents/{}/attachments/{}".format(incident_id, attachment_id)
else:
raise ValueError("If attachment_id is defined, you must specify task_id OR incident_id")
return res_client.get(metadata_url)
raise ValueError("artifact_id AND incident_id, OR attachment_id AND (task_id OR incident_id) must be specified")
def get_file_attachment_name(res_client, incident_id=None, artifact_id=None, task_id=None, attachment_id=None):
"""
call the Resilient REST API to get the attachment or artifact attachment name
:param res_client: required for communication back to resilient
:param incident_id: required
:param artifact_id: optional
:param task_id: optional
:param attachment_id: optional
:return: file attachment name
"""
name = ""
if incident_id and artifact_id:
name_url = "/incidents/{}/artifacts/{}".format(incident_id, artifact_id)
name = res_client.get(name_url)["attachment"]["name"]
elif attachment_id:
if task_id:
name_url = "/tasks/{}/attachments/{}".format(task_id, attachment_id)
name = res_client.get(name_url)["name"]
elif incident_id:
name_url = "/incidents/{}/attachments/{}".format(incident_id, attachment_id)
name = res_client.get(name_url)["name"]
else:
raise ValueError("task_id or incident_id must be specified with attachment")
else:
raise ValueError("artifact or attachment or incident id must be specified")
# Return name string
return name
def write_file_attachment(res_client, file_name, datastream, incident_id, task_id=None, content_type=None):
"""
call the Resilient REST API to create the attachment on incident or task
:param res_client: required for communication back to resilient
:param file_name: required, name of the attachment
:param dataStream: required, stream of bytes
:param incident_id: required
:param task_id: optional
:param content_type: optional, MIME type of attachment
:return: new attachment -dictionary of attachment metadata
"""
content_type = content_type \
or mimetypes.guess_type(file_name or "")[0] \
or "application/octet-stream"
attachment = datastream.read()
"""
Writing to temp path so that the REST API client can use this file path
to read and POST the attachment
"""
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
try:
temp_file.write(attachment)
temp_file.close()
# Create a new attachment by calling resilient REST API
if task_id:
attachment_uri = "/tasks/{}/attachments".format(task_id)
else:
attachment_uri = "/incidents/{}/attachments".format(incident_id)
new_attachment = res_client.post_attachment(attachment_uri,
temp_file.name,
filename=file_name,
mimetype=content_type)
finally:
os.unlink(temp_file.name)
if isinstance(new_attachment, list):
new_attachment = new_attachment[0]
return new_attachment
def readable_datetime(timestamp, milliseconds=True, rtn_format='%Y-%m-%dT%H:%M:%SZ'):
"""
convert an epoch timestamp to a string using a format
:param timestamp:
:param milliseconds: True = epoch in
:param rtn_format: format of resulant string
:return: string representation of timestamp
"""
if milliseconds:
ts = int(timestamp / 1000)
else:
ts = timestamp
return datetime.datetime.utcfromtimestamp(ts).strftime(rtn_format)
def str_to_bool(value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = str(value).lower()
return value in ('1', 'true', 'yes', 'on')
def write_to_tmp_file(data, tmp_file_name=None, path_tmp_dir=None):
"""Writes data to a file in a safely created temp directory. If no
`tmp_file_name` is provided, a temp name will be given. If no `path_tmp_dir`
is provided a temp directory is created with the prefix `resilient-lib-tmp-`.
When used within a Resilient Function, ensure you safely remove the created temp
directory in the `finally` block of the FunctionComponent code.
Example:
import os
import shutil
try:
path_tmp_file, path_tmp_dir = write_to_tmp_file(attachment_contents, tmp_file_name=attachment_metadata.get("name"))
except Exception:
yield FunctionError()
finally:
if path_tmp_dir and os.path.isdir(path_tmp_dir):
shutil.rmtree(path_tmp_dir)
:param data: bytes to be written to the file
:type data: `bytes`
:param tmp_file_name: name to be given to the file.
:type tmp_file_name: `str`
:param path_tmp_dir: path to an existing directory to use as the temp dir
:type path_tmp_dir: `str`
:return: a tuple (path_tmp_file, path_tmp_dir)
:rtype: tuple
"""
# If no tmp_file_name provided use next tempfile candidate name
if not tmp_file_name:
tmp_file_name = next(tempfile._get_candidate_names())
# If no path_tmp_dir provided, create one
if not path_tmp_dir:
path_tmp_dir = tempfile.mkdtemp(prefix="resilient-lib-tmp-")
elif not os.path.isdir(path_tmp_dir):
raise IOError("Path does not exist: {0}".format(path_tmp_dir))
# Generate path to tmp file
path_tmp_file = os.path.join(path_tmp_dir, tmp_file_name)
# Write the file
with io.open(path_tmp_file, mode="wb") as temp_file:
temp_file.write(data)
return (path_tmp_file, path_tmp_dir)
def close_incident(res_client, incident_id, kwargs):
"""
:param res_client: required for communication back to resilient
:param incident_id: required
:param kwargs: required field_name:new_value pairs dict
:return: response object
"""
if not incident_id:
raise ValueError("'incident_id' must be specified")
# API call to the TypeRest for fields "required": "close" if not in kwargs throw an error
required_fields = _get_required_fields(res_client)
missing_fields = [field for field in required_fields if field not in kwargs]
if missing_fields:
raise ValueError("Missing mandatory field(s) to close an incident: {0}".format(missing_fields))
# check for known mandatory field "plan_status" if not in kwargs add it
mandatory_fields = kwargs.copy()
if "plan_status" not in mandatory_fields:
mandatory_fields["plan_status"] = "C"
# API call to the Resilient REST API to patch the incident data (close incident)
response = _patch_to_close_incident(res_client, incident_id, mandatory_fields)
return response
def _get_required_fields(res_client):
"""
:param res_client: required for communication back to resilient
:return: list
"""
fields = _get_incident_fields(res_client)
fields_required = [field for field in fields if fields[field].get("required") == "close"]
return fields_required
@cached(cache=TTLCache(maxsize=10, ttl=600))
def _get_incident_fields(res_client):
"""
call the Resilient REST API to get list of fields required to close an incident
this call is cached for multiple calls
:param res_client: required for communication back to resilient
:return: json
"""
uri = "/types/incident"
response = res_client.get(uri)
incident_fields = response.get("fields")
return incident_fields
def _patch_to_close_incident(res_client, incident_id, close_fields):
"""
call the Resilient REST API to patch incident
:param res_client: required for communication back to resilient
:param incident_id: required
:param close_fields: required
:return: response object
"""
uri = "/incidents/{}".format(incident_id)
previous_object = res_client.get(uri)
patch = resilient.Patch(previous_object)
for field in close_fields:
patch.add_value(field, close_fields[field])
response = res_client.patch(uri, patch)
return response
|
the-stack_106_16716
|
import json
from flask import Flask, jsonify, request
from github import Github
from googlesearch import search
try:
from dotenv import load_dotenv
except ImportError:
print("No module named 'google' found")
from os import environ as env
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
app = Flask(__name__)
message_1 = "Here's what I found on the web for **"
message_2 = "Oops ! An error occured while processing data. \
Please follow the guidelines about how to use this bot \
--> https://github.com/FirePing32/Autolinks"
@app.route("/github/callback", methods=["POST"])
def issue():
secret = env["GH_TOKEN"]
g = Github(secret)
data = json.loads(request.data)
comment = data["comment"]["body"]
print(comment)
if comment.split()[0] == '!help' and data["action"] == "created":
try:
num = int(comment[-1])
query = comment[6:-2]
links = []
for j in search(query, tld="com", num=10, stop=num, pause=2):
links.append(j)
user_name = data["issue"]["user"]["login"]
post_url = data["comment"]["issue_url"] + "/comments"
repo = data["repository"]["name"]
issue_no = data["issue"]["number"]
print("\n" + post_url)
comment_body = message_1 + query + "** - \n\n"
for site_url in links:
comment_body = comment_body + "- " + site_url + "\n"
comment_body = comment_body + "\n" + f"Triggered by @{data['sender']['login']}"
print("\n" + comment_body)
g.get_user(user_name).get_repo(repo).get_issue(
issue_no
).create_comment(
comment_body
)
except Exception as e:
print(e)
user_name = data["issue"]["user"]["login"]
post_url = data["comment"]["issue_url"] + "/comments"
repo = data["repository"]["name"]
issue_no = data["issue"]["number"]
g.get_user(user_name).get_repo(repo).get_issue(
issue_no
).create_comment(
message_2
)
return jsonify("Method not allowed")
if __name__ == "__main__":
app.run(debug=True)
|
the-stack_106_16717
|
from matplotlib.pyplot import show
import streamlit as st
import datetime
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
from plots import plot_history, plot_candles, plot_forecast
symbols = [
'FLRY3.SA',
'ITSA4.SA',
'ARZZ3.SA',
'BPAC11.SA',#'BPAC11F.SA',
'SQIA3.SA',#'SQIA3F.SA',
'TAEE11.SA',#'TAEE11F.SA',
'VIIA3.SA',
'LAME4.SA',#'LAME4F.SA',
'PETR4.SA',#'PETR4F.SA',
'BRKM5.SA',#'BRKM5F.SA',
'LREN3.SA',#'LREN3F.SA',
'RADL3.SA',# 'RADL3F.SA',
'ENBR3.SA',#'ENBR3F.SA',
'EQTL3.SA',
'WEGE3.SA'#'WEGE3F.SA'
]
def get_stock_data(symbol,
start = None,
end = None,
t_delta_days = None,
verbose = False):
if t_delta_days is None:
t_delta = datetime.timedelta(days=2000)
else:
t_delta = datetime.timedelta(days=t_delta_days)
if start is None:
start = (datetime.datetime.now() - t_delta).strftime('%Y-%m-%d')
if end is None:
end = datetime.datetime.now().strftime('%Y-%m-%d')
print('symbol: {}, start: {}, end: {}, delta: {}'.format(symbol, start, end, t_delta))
df = yf.download(symbol, start=start,
end=end)
df['Date'] = df.index
df.reset_index(drop=True, inplace = True)
return df
def prophet_forecast(
data,
n_years = 1
):
period = n_years * 365
df_train = data[['Date', 'Close']]
df_train = df_train.rename(
columns = {'Date': 'ds',
'Close': 'y'}
)
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods = period)
y = m.predict(future)
return y, m
st.title('Brazilian Stock Forecast')
selected_stock = st.selectbox('Select a symbol for prediction',
symbols)
# slider selection of number of years
n_years = st.slider('Years of prediction: ', 1 ,4)
# getting the data for the selected symbol
df = get_stock_data(selected_stock)
st.subheader('Raw data')
st.write(df.head())
# candlestick
st.write('Candlestick for {}'.format(selected_stock))
fig1 = plot_candles(df, selected_stock)
st.plotly_chart(fig1)
# apply model
y, m = prophet_forecast(data=df,
n_years=n_years)
# Forecast
st.write('Forecast for {}'.format(selected_stock))
fig2 = plot_forecast(y,
period = 365*n_years)
st.pyplot(fig2)
st.write(y)
|
the-stack_106_16718
|
import requests
import datetime
import urllib.request
import urllib.error
import os
import io
import csv
import pandas as pd
class Job(object):
class Costant(object):
""" An innner class that stores all the constants. """
def __init__(self):
self.DAY_ZERO = datetime.datetime(2020, 1, 22) # 2020-1-23 will be day one.
self.STATES = self.load_states()
self.STATE_MAPPING = self.load_state_mapping()
def load_states(self):
""" Return a list of states. """
states = []
with open("./us_states_list.txt") as f:
for line in f:
states.append(line.strip())
return states
def load_state_mapping(self):
""" Return a mapping of <state id, state name>. """
MAPPING_CSV_URL = "https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master/data-locations/locations.csv"
f = io.StringIO(urllib.request.urlopen(MAPPING_CSV_URL).read().decode('utf-8'))
reader = csv.reader(f)
state_mapping = {}
# Skip first two lines
next(reader)
next(reader)
for row in reader:
state_id = int(row[1])
state_name = row[2]
state_mapping[state_id] = state_name
return state_mapping
""" Job class """
def __init__(self):
self.costant = self.Costant()
self.input_directory = "" # The directory of input reports.
self.output_directory = "" # The directory of output reports.
self.source = ""
def set_input_directory(self, input_directory):
self.input_directory = input_directory
def set_output_directory(self, output_directory):
self.output_directory = output_directory
def set_source(self, source):
self.source = source
def fetch_truth_cumulative_cases(self):
dataset = {}
URL = "https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master/data-truth/truth-Cumulative%20Cases.csv"
f = io.StringIO(urllib.request.urlopen(URL).read().decode('utf-8'))
reader = csv.reader(f)
header = next(reader, None)
location_col = -1
date_col = -1
value_col = -1
for i in range(0, len(header)):
if (header[i] == "location"):
location_col = i
elif (header[i] == "date"):
date_col = i
elif (header[i] == "value"):
value_col = i
for row in reader:
# Skip US' country level report.
if row[location_col] == "US" or row[location_col] == "NA":
continue
state_id = int(row[location_col])
if state_id not in self.costant.STATE_MAPPING:
continue
state = self.costant.STATE_MAPPING[state_id]
date = row[date_col]
val = int(row[value_col])
if state not in dataset:
dataset[state] = {}
dataset[state][date] = val
return dataset
def fetch_forecast_inc_cases(self, file_dir):
dataset = {}
with open(file_dir) as f:
reader = csv.reader(f)
header = next(reader, None)
# Because different csv files have different column arrangements,
# find out the index the columns containing different data fields first.
location_col = -1
date_col = -1
target_col = -1
type_col = -1
value_col = -1
for i in range(0, len(header)):
if (header[i] == "location"):
location_col = i
elif (header[i] == "target_end_date"):
date_col = i
elif (header[i] == "target"):
target_col = i
elif (header[i] == "type"):
type_col = i
elif (header[i] == "value"):
value_col = i
for row in reader:
if (row[type_col] == "point" \
and "inc case" in row[target_col] \
and row[location_col] != "US"):
state_id = int(row[location_col])
state = self.costant.STATE_MAPPING[state_id]
date = row[date_col]
val = int(float(row[value_col]))
if state not in dataset:
dataset[state] = {}
# Skip duplicate predictions on the same date.
if date in dataset[state]:
continue
dataset[state][date] = val
return dataset
def write_report(self, model_name, forecast_date, observed, predicted, output_model_dir):
columns = ['State']
columns.append((forecast_date - self.costant.DAY_ZERO).days)
for date_str in predicted[self.costant.STATES[0]]:
date = datetime.datetime.strptime(date_str,"%Y-%m-%d")
# Skip if the target end day is not Saturday.
if (date.weekday() != 5):
continue
columns.append((datetime.datetime.strptime(date_str,"%Y-%m-%d") - self.costant.DAY_ZERO).days)
dataframe = pd.DataFrame(columns=columns)
for state in self.costant.STATES:
new_row = {}
new_row["State"] = state
if state in observed and forecast_date.strftime("%Y-%m-%d") in observed[state]:
new_row[(forecast_date - self.costant.DAY_ZERO).days] = observed[state][forecast_date.strftime("%Y-%m-%d")]
else:
new_row[(forecast_date - self.costant.DAY_ZERO).days] = "NaN"
for date_str in predicted[self.costant.STATES[0]]:
date = datetime.datetime.strptime(date_str,"%Y-%m-%d")
# Skip if the target end day is not Saturday.
if (date.weekday() != 5):
continue
if state in predicted and date_str in predicted[state]:
new_row[(date - self.costant.DAY_ZERO).days] = predicted[state][date_str]
else:
new_row[(date - self.costant.DAY_ZERO).days] = "NaN"
dataframe = dataframe.append(new_row, ignore_index=True)
output_name = model_name + '_' + str((forecast_date - self.costant.DAY_ZERO).days) + ".csv"
output_name = output_name.replace('-', '_')
dataframe.to_csv(output_model_dir + output_name)
print(output_name + " has been written.")
def run(self):
"""
After data source, input, output directory have been set.
Read "{source}.txt" to fetch the forecast reports' filenames.
Generate the truth data set and forecast data set,
and write down formatted forecast reports into csv.
"""
forecasts = []
with open(self.source + ".txt") as f:
for line in f:
forecasts.append(line.strip())
observed = self.fetch_truth_cumulative_cases()
for forecast_filename in forecasts:
try:
forecast_date = datetime.datetime.strptime(forecast_filename[:10],"%Y-%m-%d")
model_name = forecast_filename[11:-4]
predicted = self.fetch_forecast_inc_cases(self.input_directory + forecast_filename)
# Create the model_name output directory if it does exists.
output_model_dir = (self.output_directory + model_name + '/').replace("-", "_")
if not os.path.exists(output_model_dir):
os.mkdir(output_model_dir)
self.write_report(model_name, forecast_date, observed, predicted, output_model_dir)
except:
# print("fail to read file " + forecast_filename + ".")
pass
if __name__ == "__main__":
job = Job()
job.set_input_directory("./input/")
job.set_output_directory("./output/")
job.set_source("state_case")
job.run()
|
the-stack_106_16720
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def eval(x,y):
dat=[]
c = 0
for i in [0.3,0.5,0.7,1.0]:
for j in [1.3,1.5,1.7,2.0]:
c+=1
dat.append([c,i,j,x,y,(i-x)*(j-y)])
return dat
def run(xin):
inx = file(xin,'r')
for line in inx:
if line.startswith('x =' ):
x=float(line.split('=')[1])
elif line.startswith('case =' ):
case=line.split('=')[1].strip()
elif line.startswith('auxfile ='):
aux=line.split('=')[1].strip()
iny = file(aux,'r')
for line in iny:
if line.startswith('y ='):
y=float(line.split('=')[1])
dat = eval(x,y)
outf = file(case+'.csv','w')
outf.writelines('step,i,j,x,y,poly\n')
for e in dat:
outf.writelines(','.join(str(i) for i in e)+'\n')
outf.close()
if __name__=='__main__':
import sys
args = sys.argv
inp1 = args[args.index('-i')+1] if '-i' in args else None
run(inp1)
|
the-stack_106_16721
|
# -*- coding: utf-8 -*-
"""SparkMonitor Jupyter Web Server Extension
This module adds a custom request handler to Jupyter web server.
It proxies the Spark Web UI by default running at 127.0.0.1:4040
to the endpoint notebook_base_url/sparkmonitor
TODO Create unique endpoints for different kernels or spark applications.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import os
import re
from bs4 import BeautifulSoup
from notebook.base.handlers import IPythonHandler
from tornado import httpclient
proxy_root = '/sparkmonitor'
class SparkMonitorHandler(IPythonHandler):
"""A custom tornado request handler to proxy Spark Web UI requests."""
async def get(self):
"""Handles get requests to the Spark UI
Fetches the Spark Web UI from the configured ports
"""
# print("SPARKMONITOR_SERVER: Handler GET")
port = this.get_argument("port")
baseurl = os.environ.get('SPARKMONITOR_UI_HOST', '127.0.0.1')
port = os.environ.get('SPARKMONITOR_UI_PORT', port)
url = 'http://' + baseurl + ':' + port
# print("SPARKMONITOR_SERVER: Request URI" + self.request.uri)
# print("SPARKMONITOR_SERVER: Getting from " + url)
request_path = self.request.uri[(
self.request.uri.index(proxy_root) + len(proxy_root) + 1):]
self.replace_path = self.request.uri[:self.request.uri.index(
proxy_root) + len(proxy_root)]
# print("SPARKMONITOR_SERVER: Request_path " + request_path +
# " \n Replace_path:" + self.replace_path)
backendurl = url_path_join(url, request_path)
self.debug_url = url
self.backendurl = backendurl
http = httpclient.AsyncHTTPClient()
try:
response = await http.fetch(backendurl)
except Exception as e:
print('SPARKMONITOR_SERVER: Spark UI Error ', e)
else:
self.handle_response(response)
def handle_response(self, response):
"""Sends the fetched page as response to the GET request"""
if response.error:
content_type = 'application/json'
content = json.dumps({'error': 'SPARK_UI_NOT_RUNNING',
'url': self.debug_url,
'backendurl': self.backendurl,
'replace_path': self.replace_path
})
print('SPARKMONITOR_SERVER: Spark UI not running')
else:
content_type = response.headers['Content-Type']
# print("SPARKSERVER: CONTENT TYPE: "+ content_type + "\n")
if 'text/html' in content_type:
content = replace(response.body, self.replace_path)
elif 'javascript' in content_type:
body = "location.origin +'" + self.replace_path + "' "
content = response.body.replace(
b'location.origin', body.encode())
else:
# Probably binary response, send it directly.
content = response.body
self.set_header('Content-Type', content_type)
self.write(content)
self.finish()
def load_jupyter_server_extension(nb_server_app):
"""
Called when the Jupyter server extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle
to the Notebook webserver instance.
"""
print('SPARKMONITOR_SERVER: Loading Server Extension')
web_app = nb_server_app.web_app
host_pattern = '.*$'
route_pattern = url_path_join(
web_app.settings['base_url'], proxy_root + '.*')
web_app.add_handlers(host_pattern, [(route_pattern, SparkMonitorHandler)])
try:
import lxml # noqa
except ImportError:
BEAUTIFULSOUP_BUILDER = 'html.parser'
else:
BEAUTIFULSOUP_BUILDER = 'lxml'
# a regular expression to match paths against the Spark on EMR proxy paths
PROXY_PATH_RE = re.compile(r'\/proxy\/application_\d+_\d+\/(.*)')
# a tuple of tuples with tag names and their attribute to automatically fix
PROXY_ATTRIBUTES = (
(('a', 'link'), 'href'),
(('img', 'script'), 'src'),
)
def replace(content, root_url):
"""Replace all the links with our prefixed handler links,
e.g.:
/proxy/application_1467283586194_0015/static/styles.css" or
/static/styles.css
with
/spark/static/styles.css
"""
soup = BeautifulSoup(content, BEAUTIFULSOUP_BUILDER)
for tags, attribute in PROXY_ATTRIBUTES:
for tag in soup.find_all(tags, **{attribute: True}):
value = tag[attribute]
match = PROXY_PATH_RE.match(value)
if match is not None:
value = match.groups()[0]
tag[attribute] = url_path_join(root_url, value)
return str(soup)
def url_path_join(*pieces):
"""Join components of url into a relative url
Use to prevent double slash when joining subpath. This will leave the
initial and final / in place
"""
initial = pieces[0].startswith('/')
final = pieces[-1].endswith('/')
stripped = [s.strip('/') for s in pieces]
result = '/'.join(s for s in stripped if s)
if initial:
result = '/' + result
if final:
result = result + '/'
if result == '//':
result = '/'
return result
|
the-stack_106_16723
|
from rest_framework import status
from rest_framework.mixins import CreateModelMixin
from rest_framework.response import Response
class BulkCreateModelMixin(CreateModelMixin):
"""
Either create a single or many model instances in bulk by using the
Serializers ``many=True``.
Example:
class ContactViewSet(StandartizedModelViewSet):
...
allow_bulk_create = True
...
"""
allow_bulk_create = False
def create(self, request, *args, **kwargs):
bulk = isinstance(request.data, list)
if not bulk:
return super().create(request, *args, **kwargs)
if not self.allow_bulk_create:
self.permission_denied(
request,
message='You do not have permission to create multiple objects'
)
serializer = self.get_serializer(data=request.data, many=True)
serializer.is_valid(raise_exception=True)
self.perform_bulk_create(serializer)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def perform_bulk_create(self, serializer):
return self.perform_create(serializer)
|
the-stack_106_16725
|
"""
Defines functions for double precision 16 character field writing.
"""
import sys
import warnings
from typing import List, Union
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.cards.utils import wipe_empty_fields
def print_scientific_double(value: float) -> str:
"""
Prints a value in 16-character scientific double precision.
Scientific Notation: 5.0E+1
Double Precision Scientific Notation: 5.0D+1
"""
if value < 0:
fmt = "%16.9e"
else:
fmt = "%16.10e"
svalue = fmt % value
field = svalue.replace('e', 'D')
if field == '-0.0000000000D+00':
field = '0.0000000000D+00'
#assert len(field) == 16, ('value=%r field=%r is not 16 characters '
# 'long, its %s' % (value, field, len(field)))
return field
def print_field_double(value: Union[int, float, str, None]) -> str:
"""
Prints a 16-character width field
:param value: the value to print
:returns field: an 16-character string
"""
if isinstance(value, integer_types):
field = "%16s" % value
elif isinstance(value, float):
field = print_scientific_double(value)
elif value is None:
field = " "
else:
field = "%16s" % value
if len(field) != 16:
msg = 'field=%r is not 16 characters long...rawValue=%r' % (field, value)
raise RuntimeError(msg)
return field
def print_card_double(fields: List[Union[int, float, str, None]], wipe_fields: bool=True) -> str:
"""
Prints a nastran-style card with 16-character width fields.
Parameters
----------
fields : List[varies]
all the fields in the BDF card (no trailing Nones)
wipe_fields : bool; default=True
some cards (e.g. PBEAM) have ending fields
that need to be there, others cannot have them.
Returns
-------
card : str
string representation of the card in small field format
.. note:: An internal field value of None or '' will be treated as
a blank field
.. note:: A large field format follows the 8-16-16-16-16-8 = 80
format where the first 8 is the card name or
blank (continuation). The last 8-character field indicates
an optional continuation, but because it's a left-justified
unneccessary field, print_card doesnt use it.
.. code-block:: python
>>> fields = ['DUMMY', 1, 2, 3, None, 4, 5, 6, 7, 8.]
>>> print_card_double(fields)
DUMMY* 1 2 3
* 4 5 6 7
* 8.0000000000D+00
*
"""
if wipe_fields:
fields = wipe_empty_fields(fields)
nfields_main = len(fields) - 1 # chop off the card name
nbdf_lines = nfields_main // 8
if nfields_main % 8 != 0:
nbdf_lines += 1
nextra_fields = 8 * nbdf_lines - nfields_main
fields += [None] * nextra_fields
try:
out = '%-8s' % (fields[0] + '*')
except:
warnings.warn("ERROR! fields=%s" % fields)
sys.stdout.flush()
raise
for i in range(1, len(fields)):
field = fields[i]
try:
out += print_field_double(field)
except:
warnings.warn("bad fields = %s" % fields)
raise
if i % 4 == 0: # allow 1+4 fields per line
out = out.rstrip(' ')
if out[-1] == '\n': # empty line
out += '*'
out += '\n* '
out = out.rstrip(' *') # removes one continuation star
if not out.endswith('\n'):
out += '\n'
return out
|
the-stack_106_16726
|
from tkinter import *
from tkinter.tix import Tk, Control, ComboBox # 升级的控件组包
from tkinter.messagebox import showinfo, showwarning, showerror # 各种类型的提示框
from tkinter import filedialog
from PIL import Image, ImageTk
from tkinter.messagebox import *
import os
import operator
from numpy import *
import cv2
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
# from sklearn.externals import joblib
import joblib
# 算法部分
# 加载数据集
def loadDataSet(k): # k代表在10张图片中选择几张作为训练集
dataSetDir = 'ORL_Faces'
# 显示文件夹内容
choose = random.permutation(10) + 1 # 随机排序1-10 (0-9)+1
train_face = zeros((40 * k, 112 * 92))
train_face_number = zeros(40 * k)
test_face = zeros((40 * (10 - k), 112 * 92))
test_face_number = zeros(40 * (10 - k))
for i in range(40): # 40个人
people_num = i + 1
for j in range(10): # 每个人有10个不同的脸
if j < k: # 测试集
filename = dataSetDir + '/s' + str(people_num) + '/' + str(choose[j]) + '.pgm'
img = img2vector(filename)
train_face[i * k + j, :] = img
train_face_number[i * k + j] = people_num
else:
filename = dataSetDir + '/s' + str(people_num) + '/' + str(choose[j]) + '.pgm'
img = img2vector(filename)
test_face[i * (10 - k) + (j - k), :] = img
test_face_number[i * (10 - k) + (j - k)] = people_num
return train_face, train_face_number, test_face, test_face_number
# 将图片转换成矩阵
def img2vector(filename):
img = cv2.imread(filename, 0) # 读入灰度值
print(filename)
rows, cols = img.shape
imgVector = zeros((1, rows * cols))
imgVector = reshape(img, (1, rows * cols)) # 将2维转成1维
return imgVector
def facefind():
# 获取训练集
train_face, train_face_number, test_face, test_face_number = loadDataSet(8)
print(train_face)
print(train_face_number)
print(test_face)
print(test_face_number)
# PCA训练训练集,用pca将数据降到30维
pca = PCA(n_components=30).fit(train_face)
# 返回测试集和训练集降维后的数据集
x_train_pca = pca.transform(train_face)
x_test_pca = pca.transform(test_face)
# 逻辑回归训练
classirfier = LogisticRegression()
lr = classirfier.fit(x_train_pca, train_face_number)
# 保存模型
joblib.dump(lr, 'lr.model')
# 计算精确度和召回率
accuray = classirfier.score(x_test_pca, test_face_number)
recall = accuray * 0.7
return accuray, recall, pca
# 界面部分
def choosePic(): # 选择图片函数
file_path = filedialog.askopenfilename() # 加载文件
path.set(file_path)
img_open = Image.open(file.get())
img = ImageTk.PhotoImage(img_open)
pic_label.config(image=img)
pic_label.image = img
string = str(file.get())
# 预测的人
predict = img2vector(string)
# 加载模型
LR = joblib.load('lr.model')
predict_people = LR.predict(pca.transform(predict))
string1 = str("编号:%s 精确度:%f 召回率:%f" % (predict_people, accuray, recall))
showinfo(title='图像分析', message=string1)
# 初始化Tk()
accuray, recall, pca = facefind()
root = Tk() # root便是你布局的根节点了,以后的布局都在它之上
root.geometry('260x140')
root.title("人脸识别系统") # 设置窗口标题
root.resizable(width=False, height=False) # 设置窗口是否可变
root.tk.eval('package require Tix') # 引入升级包,这样才能使用升级的组合控件
path = StringVar() # 跟踪变量的值的变化
Button(root, text='选择图片', command=choosePic, width=1, height=1).grid(row=1, column=1, sticky=W + E + N + S, padx=40,
pady=20) # command指定其回调函数
file = Entry(root, state='readonly', text=path)
file.grid(row=0, column=1, sticky=W + E + S + N, padx=6, pady=20) # 用作文本输入用
pic_label = Label(root, text='图片', padx=30, pady=10)
pic_label.grid(row=0, column=2, rowspan=4, sticky=W + E + N + S)
root.mainloop()
|
the-stack_106_16729
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from os.path import expanduser, dirname, join
from glob import glob
from itertools import chain
from subprocess import check_output, CalledProcessError
import sys
import distutils.unixccompiler
__version__ = '1.4.0'
###############################################################################
# Monkey-patch setuptools to compile in parallel (copied from pytorch)
###############################################################################
original_link = distutils.unixccompiler.UnixCCompiler.link
def parallelCCompile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# compile using a thread pool
import multiprocessing.pool
def _single_compile(obj):
src, ext = build[obj]
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
num_jobs = multiprocessing.cpu_count()
multiprocessing.pool.ThreadPool(num_jobs).map(_single_compile, objects)
return objects
def patched_link(self, *args, **kwargs):
_cxx = self.compiler_cxx
self.compiler_cxx = None
result = original_link(self, *args, **kwargs)
self.compiler_cxx = _cxx
return result
distutils.ccompiler.CCompiler.compile = parallelCCompile
distutils.unixccompiler.UnixCCompiler.link = patched_link
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
sources = list(chain(
glob('py/*.cpp'),
glob('replayer/*.cpp'),
glob('client/*.cpp'),
))
print(sources)
ext_modules = [
Extension(
'torchcraft',
sources,
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
"include",
"replayer",
".",
"BWEnv/fbs",
],
# TODO Search for ZSTD and define this if it exists
define_macros=[('WITH_ZSTD', None)],
libraries=['zstd', 'zmq'],
language='c++'
),
]
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append("-std=c++11")
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = opts
build_ext.build_extensions(self)
setup(
name='torchcraft',
version=__version__,
author='Zeming Lin',
author_email='[email protected]',
url='',
description='Torchcraft',
long_description='',
ext_modules=ext_modules,
install_requires=['pybind11>=2.1'],
cmdclass={'build_ext': BuildExt},
zip_safe=False,
)
|
the-stack_106_16730
|
# _*_ coding:utf-8 _*_
'''
Vectorindexer
'''
from pyspark.sql import SparkSession
from pyspark.ml.feature import VectorIndexer
spark = SparkSession.builder.appName("vectorindexer").getOrCreate()
paths="/export/home/ry/spark-2.2.1-bin-hadoop2.7/data/mllib/"
data=spark.read.format("libsvm").load(paths+"sample_isotonic_regression_libsvm_data.txt")
indexer=VectorIndexer(inputCol="features",outputCol="indexed",maxCategories=10)
indexerModel=indexer.fit(data)
indexedData=indexerModel.transform(data)
indexedData.show()
|
the-stack_106_16731
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections.abc import MutableMapping
import glob as local_glob
from gzip import GzipFile
from urllib.parse import urlparse
from pyarrow import FileSystem
from pyarrow import LocalFileSystem as ArrowLocalFileSystem
from pyarrow import HadoopFileSystem
from pyarrow.util import implements
try:
import lz4
import lz4.frame
except ImportError: # pragma: no cover
lz4 = None
compressions = {
'gzip': lambda f: GzipFile(fileobj=f)
}
if lz4:
compressions['lz4'] = lz4.frame.open
FileSystem = FileSystem
class LocalFileSystem(ArrowLocalFileSystem):
_fs_instance = None
@classmethod
def get_instance(cls):
if cls._fs_instance is None:
cls._fs_instance = LocalFileSystem()
return cls._fs_instance
@implements(FileSystem.delete)
def delete(self, path, recursive=False):
if os.path.isfile(path):
os.remove(path)
elif not recursive:
os.rmdir(path)
else:
shutil.rmtree(path)
def stat(self, path):
os_stat = os.stat(path)
stat = dict(name=path, size=os_stat.st_size, created=os_stat.st_ctime)
if os.path.isfile(path):
stat['type'] = 'file'
elif os.path.isdir(path):
stat['type'] = 'directory'
else:
stat['type'] = 'other'
return stat
@staticmethod
def glob(path):
return local_glob.glob(path)
file_systems = {
'file': LocalFileSystem,
'hdfs': HadoopFileSystem
}
def _parse_from_path(uri):
parsed_uri = urlparse(uri)
options = dict()
options['host'] = parsed_uri.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0]
if parsed_uri.port:
options["port"] = parsed_uri.port
if parsed_uri.username:
options["user"] = parsed_uri.username
if parsed_uri.password:
options["password"] = parsed_uri.password
return options
def get_fs(path, storage_options):
if os.path.exists(path) or local_glob.glob(path):
scheme = 'file'
else:
scheme = urlparse(path).scheme
if scheme == '' or len(scheme) == 1: # len == 1 for windows
scheme = 'file'
if scheme == 'file':
return file_systems[scheme].get_instance()
else:
options = _parse_from_path(path)
storage_options = storage_options or dict()
storage_options.update(options)
return file_systems[scheme](**storage_options)
def open_file(path, mode='rb', compression=None, storage_options=None):
fs = get_fs(path, storage_options)
f = fs.open(path, mode=mode)
if compression is not None:
compress = compressions[compression]
f = compress(f)
return f
def glob(path, storage_options=None):
if '*' in path:
fs = get_fs(path, storage_options)
return fs.glob(path)
else:
return [path]
def file_size(path, storage_options=None):
fs = get_fs(path, storage_options)
return fs.stat(path)['size']
class FSMap(MutableMapping):
"""Wrap a FileSystem instance as a mutable wrapping.
The keys of the mapping become files under the given root, and the
values (which must be bytes) the contents of those files.
Parameters
----------
root: string
prefix for all the files
fs: FileSystem instance
check: bool (=True)
performs a touch at the location, to check for write access.
"""
def __init__(self, root, fs, check=False, create=False):
self.fs = fs
self.root = self._get_path(fs, root)
if create:
if not self.fs.exists(root):
self.fs.mkdir(root)
if check:
if not self.fs.exists(root):
raise ValueError(
"Path %s does not exist. Create "
" with the ``create=True`` keyword" % root
)
with self.fs.open(fs.pathsep.join([root, "a"]), 'w'):
pass
self.fs.rm(fs.pathsep.join([root, "a"]))
@staticmethod
def _get_path(fs, path):
return path if isinstance(fs, LocalFileSystem) else urlparse(path).path
@staticmethod
def _normalize_path(fs, path, lstrip=False, rstrip=False):
if fs.pathsep != '/':
path = path.replace('/', fs.pathsep)
if lstrip:
path = path.lstrip(fs.pathsep)
if rstrip:
path = path.rstrip(fs.pathsep)
return path
@staticmethod
def _join_path(fs, paths):
if fs.pathsep == '/':
return '/'.join(paths)
new_paths = []
for i, path in enumerate(paths):
path = FSMap._normalize_path(fs, path, lstrip=i > 0,
rstrip=i < len(paths) - 1)
new_paths.append(path)
return fs.pathsep.join(new_paths)
def clear(self):
"""Remove all keys below root - empties out mapping
"""
try:
self.fs.rm(self.root, True)
self.fs.mkdir(self.root)
except: # noqa: E722
pass
def _key_to_str(self, key):
"""Generate full path for the key"""
if isinstance(key, (tuple, list)):
key = str(tuple(key))
else:
key = str(key)
return self._join_path(self.fs, [self.root, key]) if self.root else key
def _str_to_key(self, s):
"""Strip path of to leave key name"""
key = self._normalize_path(self.fs, s[len(self.root):], lstrip=True)
if self.fs.pathsep != '/':
key = key.replace(self.fs.pathsep, '/')
return key
def __getitem__(self, key, default=None):
"""Retrieve data"""
key = self._key_to_str(key)
try:
result = self.fs.cat(key)
except: # noqa: E722
if default is not None:
return default
raise KeyError(key)
return result
def pop(self, key, default=None):
result = self.__getitem__(key, default)
try:
del self[key]
except KeyError:
pass
return result
@staticmethod
def _parent(fs, path):
path = FSMap._get_path(fs, path.rstrip(fs.pathsep))
if fs.pathsep in path:
return path.rsplit(fs.pathsep, 1)[0]
else:
return ''
def __setitem__(self, key, value):
"""Store value in key"""
key = self._key_to_str(key)
try:
self.fs.mkdir(self._parent(self.fs, key))
except FileExistsError:
pass
with self.fs.open(key, "wb") as f:
f.write(value)
@staticmethod
def _find(fs, path):
out = set()
for path, dirs, files in fs.walk(path):
out.update(fs.pathsep.join([path, f]) for f in files)
if fs.isfile(path) and path not in out:
# walk works on directories, but find should also return [path]
# when path happens to be a file
out.add(path)
return sorted(out)
def __iter__(self):
return (self._str_to_key(x) for x in self._find(self.fs, self.root))
def __len__(self):
return len(self._find(self.fs, self.root))
def __delitem__(self, key):
"""Remove key"""
try:
self.fs.rm(self._key_to_str(key))
except: # noqa: E722
raise KeyError
def __contains__(self, key):
"""Does key exist in mapping?"""
return self.fs.exists(self._key_to_str(key))
|
the-stack_106_16733
|
from keras.models import Sequential
from keras.optimizers import Adam
from ntm_keras.ntm import NeuralTuringMachine as NTM
from ntm_keras.ntm import controller_input_output_shape as controller_shape
from keras.layers.recurrent import LSTM
import numpy as np
def generator():
input_file = "input.npy"
target_file = "target.npy"
inp = np.load(input_file)
target = np.load(target_file)
while True:
example_id = np.random.randint(0, 142)
yield (np.array([inp[example_id]]), np.array([target[example_id]]))
def model():
input_dim = 128 #or so, the number of piano keys or a bit less)
output_dim = 128
batch_size = 1
m_depth = 250 #memory depth
n_slots = 128 #number of slots
shift_range = 3
read_heads = 1
write_heads = 1
controller_input_dim, controller_output_dim = controller_shape(input_dim, output_dim, m_depth, n_slots, shift_range, read_heads, write_heads)
print("Creating a following NTM architecture")
print("Every single one-hot encoding is %d"%input_dim)
print("Two LSTM layers with input dimenstion %d, output dimenstion %d"%(controller_input_dim,controller_output_dim) )
print("NTM memory depth is %d, number of slots %d, head shift range %d"%(m_depth,n_slots,shift_range))
controller = Sequential()
controller.name="Two layer LSTM"
controller.add(LSTM(units= 1024,
stateful=True,return_sequences=True,
implementation=2, # best for gpu. other ones also might not work.
batch_input_shape=(batch_size, None, controller_input_dim)))
controller.add(LSTM(units=controller_output_dim,
activation='linear', #this has to be linear if I understand it correctly
stateful=True,
implementation=2)) # best for gpu. other ones also might not work.
lr = 5e-4
clipnorm = 10
sgd = Adam(lr=lr, clipnorm=clipnorm)
controller.compile(loss='binary_crossentropy', optimizer=sgd,
metrics = ['binary_accuracy'], sample_weight_mode="temporal")
model = Sequential()
model.name = "NTM_-_" + controller.name
ntm = NTM(output_dim, n_slots=n_slots, m_depth=m_depth, shift_range=shift_range,
controller_model=controller,
return_sequences=True,
input_shape=(250, input_dim),
batch_size = batch_size)
model.add(ntm)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics = ['binary_accuracy'], sample_weight_mode="temporal")
model.summary()
return model
num_epochs = 10
sample_generator = generator()
model = model()
model.fit_generator(sample_generator, steps_per_epoch=10, epochs=num_epochs)#, callbacks=callbacks)
|
the-stack_106_16734
|
"""Webserver example."""
from aiohttp import web
from aries_staticagent import StaticConnection, utils
from common import config
def main():
"""Create StaticConnection and start web server."""
keys, target, args = config()
conn = StaticConnection(keys, target)
@conn.route("https://didcomm.org/basicmessage/1.0/message")
async def basic_message(msg, conn):
"""Respond to a basic message."""
await conn.send_async({
"@type": "https://didcomm.org/"
"basicmessage/1.0/message",
"~l10n": {"locale": "en"},
"sent_time": utils.timestamp(),
"content": "You said: {}".format(msg['content'])
})
async def handle(request):
"""aiohttp handle POST."""
response = []
with conn.session(response.append) as session:
await conn.handle(await request.read(), session)
if response:
return web.Response(text=response.pop())
raise web.HTTPAccepted()
app = web.Application()
app.add_routes([web.post('/', handle)])
web.run_app(app, port=args.port)
if __name__ == '__main__':
main()
|
the-stack_106_16735
|
import re
import time
import threading
from .utils import is_windows, encode_attr
from .event import Event
from .control import Control
class Connection:
def __init__(self, conn_id):
self.conn_id = conn_id
self.lock = threading.Lock()
self.win_command_pipe = None
self.win_event_pipe = None
self.event_available = threading.Event()
self.last_event = None
self._event_handlers = {}
if is_windows():
self.__init_windows()
else:
self.__init_linux()
self.__start_event_loop()
self.on_event = self.__on_event
def __on_event(self, evt):
pass
def send_batch(self, commands):
with self.lock:
self.__send("begin")
for command in commands:
self.__send(command)
result = self.__send("end")
if result == "":
return []
else:
return result.split('\n')
def send(self, command):
with self.lock:
return self.__send(command)
def __send(self, command):
fire_and_forget = False
cmdName = command.split(' ', 1)[0].strip()
if cmdName[len(cmdName) - 1] == 'f' or cmdName.lower() == 'close':
fire_and_forget = True
if is_windows():
return self.__send_windows(command, fire_and_forget)
else:
return self.__send_linux(command, fire_and_forget)
def wait_event(self):
self.event_available.clear()
self.event_available.wait()
return self.last_event
def wait_close(self):
while True:
e = self.wait_event()
if e.target == "page" and e.name == "close":
break
def __start_event_loop(self):
thread = threading.Thread(target=self.__event_loop, daemon=True)
thread.start()
def __event_loop(self):
while True:
if is_windows():
evts = self.__wait_events_windows()
else:
evts = self.__wait_events_linux()
for e in evts:
if e == None:
return
if self.on_event != None:
self.on_event(e)
if e.target == "page" and e.name == "close":
self.close()
return
elif e.target != "page" or e.name != "change":
self.last_event = e
self.event_available.set()
def __init_windows(self):
self.win_command_pipe = open(rf'\\.\pipe\{self.conn_id}', 'r+b', buffering=0)
self.win_event_pipe = open(rf'\\.\pipe\{self.conn_id}.events', 'r+b', buffering=0)
def __send_windows(self, command, fire_and_forget):
# send command
self.win_command_pipe.write(command.encode('utf-8'))
if fire_and_forget:
return
# wait for result
r = self.win_command_pipe.readline().decode('utf-8').strip('\n')
result_parts = re.split(r"\s", r, 1)
if result_parts[0] == "error":
raise Exception(result_parts[1])
result = result_parts[1]
extra_lines = int(result_parts[0])
for _ in range(extra_lines):
line = self.win_command_pipe.readline().decode('utf-8').strip('\n')
result = result + "\n" + line
return result
def __wait_events_windows(self):
r = self.win_event_pipe.readline().decode('utf-8').strip('\n')
yield self.__parse_event_line(r)
def __init_linux(self):
pass
def __send_linux(self, command, fire_and_forget):
# send command
pipe = open(rf'{self.conn_id}', "w")
pipe.write(command)
pipe.close()
if fire_and_forget:
return
# wait for result
pipe = open(rf'{self.conn_id}', "r")
r = pipe.readline().strip('\n')
result_parts = re.split(r"\s", r, 1)
if result_parts[0] == "error":
raise Exception(result_parts[1])
result = result_parts[1]
extra_lines = int(result_parts[0])
for _ in range(extra_lines):
line = pipe.readline().strip('\n')
result = result + "\n" + line
pipe.close()
return result
def __wait_events_linux(self):
for line in open(rf'{self.conn_id}.events', "r"):
yield self.__parse_event_line(line.strip('\n'))
def __parse_event_line(self, line):
if line == "":
return None
result_parts = re.split(r"\s", line, 2)
return Event(result_parts[0], result_parts[1], result_parts[2])
def close(self):
if self.win_command_pipe != None:
self.win_command_pipe.close()
if self.win_event_pipe != None:
self.win_event_pipe.close()
|
the-stack_106_16739
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import distutils
from typing import Optional, Text, List, Dict, Any, Union
from absl import logging
from tfx import types
from tfx.components.example_gen import component, base_example_gen_executor
from tfx.extensions.google_cloud_big_query import utils
from tfx.dsl.components.base import executor_spec
from tfx.proto import example_gen_pb2
import apache_beam as beam
import tensorflow as tf
from google.cloud import bigquery
from src.beam_components.window import window_elements
from src.serialisers.tfexample import TFExampleSerialiser, TFSequenceExampleSerialiser
class _BigQueryConverter(object):
def __init__(
self,
query: Text,
use_sequenceexample: bool = False,
project_id: Optional[Text] = None,
):
client = bigquery.Client(project=project_id)
# Dummy query to get the type information for each field.
query_job = client.query("SELECT * FROM ({}) LIMIT 0".format(query))
results = query_job.result()
schema = {f.name: f.field_type for f in results.schema}
if use_sequenceexample:
self._serialiser = TFSequenceExampleSerialiser(schema)
else:
self._serialiser = TFExampleSerialiser(schema)
def Convert(
self, window: List[Dict[Text, Any]]
) -> Union[tf.train.Example, tf.train.SequenceExample]:
return self._serialiser.from_json(window)
class _BigQueryTimestampParser(beam.DoFn):
def __init__(self, timestamp_column):
super().__init__()
self._timestamp_column = timestamp_column
def process(self, bigquery_item):
# Extract the numeric Unix seconds-since-epoch timestamp to be
# associated with the current log entry.
timestamp = bigquery_item[self._timestamp_column]
# Wrap and emit the current entry and new timestamp in a
# TimestampedValue.
yield beam.window.TimestampedValue(bigquery_item, timestamp.timestamp())
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
def _BigQueryToExampleWithSlidingWindow(
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any], split_pattern: Text
) -> beam.pvalue.PCollection:
# TODO: retrieve the window_length property better:
custom_config = ast.literal_eval(exec_properties.get("custom_config"))
window_length = int(custom_config["window_length"])
bq_timestamp_attribute = custom_config["bq_timestamp_attribute"]
drop_irregular_windows = bool(
distutils.util.strtobool(custom_config["drop_irregular_windows"])
)
use_sequenceexample = bool(
distutils.util.strtobool(custom_config["use_sequenceexample"])
)
beam_pipeline_args = exec_properties["_beam_pipeline_args"]
pipeline_options = beam.options.pipeline_options.PipelineOptions(beam_pipeline_args)
# Try to parse the GCP project ID from the beam pipeline options.
project = pipeline_options.view_as(
beam.options.pipeline_options.GoogleCloudOptions
).project
if isinstance(project, beam.options.value_provider.ValueProvider):
project = project.get()
converter = _BigQueryConverter(split_pattern, use_sequenceexample, project)
if drop_irregular_windows:
logging.warning("ExampleGen will silently drop windows with irregular lengths")
windowed_rows = (
pipeline
| "QueryTable" >> utils.ReadFromBigQuery(query=split_pattern)
| "ParseTimestamp"
>> beam.ParDo(_BigQueryTimestampParser(bq_timestamp_attribute))
| "WindowElements"
>> window_elements(
window_length=window_length,
drop_irregular_windows=drop_irregular_windows,
sort_windows_by=bq_timestamp_attribute,
)
)
if use_sequenceexample:
logging.warning("ExampleGen will output tf.train.SequenceExample")
return windowed_rows | "MapToTFSequenceExample" >> beam.Map(
converter.Convert
).with_output_types(tf.train.SequenceExample)
else:
logging.warning("ExampleGen will output tf.train.Example")
return windowed_rows | "MapToTFExample" >> beam.Map(
converter.Convert
).with_output_types(tf.train.Example)
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""TFX BigQueryExampleGen executor extended with sliding window."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for BigQuery to TF examples."""
return _BigQueryToExampleWithSlidingWindow
class BigQueryExampleWithSlidingWindowGen(component.QueryBasedExampleGen):
"""UNOfficial TFX BigQueryExampleGen component with sliding windows.
The BigQuery examplegen component takes a query, and generates train
and eval examples for downsteam components.
"""
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(Executor)
def __init__(
self,
window_length: int,
bq_timestamp_attribute: str,
drop_irregular_windows: bool = True,
use_sequenceexample: bool = False,
input_config: Optional[example_gen_pb2.Input] = None,
output_config: Optional[example_gen_pb2.Output] = None,
example_artifacts: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
):
"""Constructs a BigQueryExampleWithSlidingWindowGen component.
Args:
window_length: The length of the sliding window to generate.
Unit is both seconds and elements, as the underlying elements are
expected to be timestamped at a rate of 1Hz.
bq_timestamp_attribute: The attribute in bigquery to use for the timestamp
of the elements.
drop_irregular_windows: Flag whether to drop windows that do not have the
specified window_length. This can happen if the underlying timestamps
have a frequency other than 1Hz, as well as at the boundaries of the
bigquery query. Default True.
use_sequenceexample: Flag whether to return sequenceexamples.
If True will return elements of type tf.train.SequenceExample
If False will return elements of type tf.train.Example
Defaults to False.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split. If any field is provided as a
RuntimeParameter, input_config should be constructed as a dict with the
same field names as Input proto message.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1. If any field is provided as a RuntimeParameter,
input_config should be constructed as a dict with the same field names
as Output proto message.
example_artifacts: Optional channel of 'ExamplesPath' for output train and
eval examples.
instance_name: Optional unique instance name. Necessary if multiple
BigQueryExampleGen components are declared in the same pipeline.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
super(BigQueryExampleWithSlidingWindowGen, self).__init__(
input_config=input_config,
custom_config={
"window_length": str(window_length),
"bq_timestamp_attribute": str(bq_timestamp_attribute),
"drop_irregular_windows": str(drop_irregular_windows),
"use_sequenceexample": str(use_sequenceexample),
},
output_config=output_config,
example_artifacts=example_artifacts,
instance_name=instance_name,
)
|
the-stack_106_16740
|
"""
A Spawner for JupyterHub that runs each user's server in a separate docker container
"""
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
import os
from pprint import pformat
import string
from tarfile import TarFile, TarInfo
from textwrap import dedent
from urllib.parse import urlparse
import warnings
import docker
from docker.errors import APIError
from docker.utils import kwargs_from_env
from tornado import gen, web
from escapism import escape
from jupyterhub.spawner import Spawner
from traitlets import (
Any,
Bool,
CaselessStrEnum,
Dict,
List,
Int,
Unicode,
Union,
default,
observe,
validate,
)
from .volumenamingstrategy import default_format_volume_name
class UnicodeOrFalse(Unicode):
info_text = "a unicode string or False"
def validate(self, obj, value):
if value is False:
return value
return super(UnicodeOrFalse, self).validate(obj, value)
import jupyterhub
_jupyterhub_xy = "%i.%i" % (jupyterhub.version_info[:2])
class DockerSpawner(Spawner):
"""A Spawner for JupyterHub that runs each user's server in a separate docker container"""
_executor = None
_deprecated_aliases = {
"container_ip": ("host_ip", "0.9.*"),
"container_port": ("port", "0.9.*"),
"container_image": ("image", "0.9.*"),
"container_prefix": ("prefix", "0.10.0"),
"container_name_template": ("name_template", "0.10.0*"),
"remove_containers": ("remove", "0.10.0"),
"image_whitelist": ("allowed_images", "0.12.0"),
}
@observe(*list(_deprecated_aliases))
def _deprecated_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in DockerSpawner {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
@property
def executor(self):
"""single global executor"""
cls = self.__class__
if cls._executor is None:
cls._executor = ThreadPoolExecutor(1)
return cls._executor
_client = None
@property
def client(self):
"""single global client instance"""
cls = self.__class__
if cls._client is None:
kwargs = {"version": "auto"}
if self.tls_config:
kwargs["tls"] = docker.tls.TLSConfig(**self.tls_config)
kwargs.update(kwargs_from_env())
kwargs.update(self.client_kwargs)
client = docker.APIClient(**kwargs)
cls._client = client
return cls._client
# notice when user has set the command
# default command is that of the container,
# but user can override it via config
_user_set_cmd = False
@observe("cmd")
def _cmd_changed(self, change):
self._user_set_cmd = True
object_id = Unicode()
# the type of object we create
object_type = "container"
# the field containing the object id
object_id_key = "Id"
@property
def container_id(self):
"""alias for object_id"""
return self.object_id
@property
def container_name(self):
"""alias for object_name"""
return self.object_name
# deprecate misleading container_ip, since
# it is not the ip in the container,
# but the host ip of the port forwarded to the container
# when use_internal_ip is False
container_ip = Unicode("127.0.0.1", help="Deprecated, use `DockerSpawner.host_ip`", config=True)
host_ip = Unicode(
"127.0.0.1",
help="""The ip address on the host on which to expose the container's port
Typically 127.0.0.1, but can be public interfaces as well
in cases where the Hub and/or proxy are on different machines
from the user containers.
Only used when use_internal_ip = False.
""",
config=True,
)
@default('host_ip')
def _default_host_ip(self):
docker_host = os.getenv('DOCKER_HOST')
if docker_host:
urlinfo = urlparse(docker_host)
if urlinfo.scheme == 'tcp':
return urlinfo.hostname
return '127.0.0.1'
# unlike container_ip, container_port is the internal port
# on which the server is bound.
container_port = Int(8888, min=1, max=65535, help="Deprecated, use `DockerSpawner.port.`", config=True)
# fix default port to 8888, used in the container
@default("port")
def _port_default(self):
return 8888
# default to listening on all-interfaces in the container
@default("ip")
def _ip_default(self):
return "0.0.0.0"
container_image = Unicode(
"jupyterhub/singleuser:%s" % _jupyterhub_xy,
help="Deprecated, use `DockerSpawner.image.`",
config=True
)
image = Unicode(
"jupyterhub/singleuser:%s" % _jupyterhub_xy,
config=True,
help="""The image to use for single-user servers.
This image should have the same version of jupyterhub as
the Hub itself installed.
If the default command of the image does not launch
jupyterhub-singleuser, set `c.Spawner.cmd` to
launch jupyterhub-singleuser, e.g.
Any of the jupyter docker-stacks should work without additional config,
as long as the version of jupyterhub in the image is compatible.
""",
)
image_whitelist = Union([Any(), Dict(), List()], help="Deprecated, use `DockerSpawner.allowed_images`.", config=True,)
allowed_images = Union(
[Any(), Dict(), List()],
default_value={},
config=True,
help="""
List or dict of images that users can run.
If specified, users will be presented with a form
from which they can select an image to run.
If a dictionary, the keys will be the options presented to users
and the values the actual images that will be launched.
If a list, will be cast to a dictionary where keys and values are the same
(i.e. a shortcut for presenting the actual images directly to users).
If a callable, will be called with the Spawner instance as its only argument.
The user is accessible as spawner.user.
The callable should return a dict or list as above.
.. versionchanged:: 0.12.0
`DockerSpawner.image_whitelist` renamed to `allowed_images`
""",
)
@validate('allowed_images')
def _allowed_images_dict(self, proposal):
"""cast allowed_images to a dict
If passing a list, cast it to a {item:item}
dict where the keys and values are the same.
"""
allowed_images = proposal.value
if isinstance(allowed_images, list):
allowed_images = {item: item for item in allowed_images}
return allowed_images
def _get_allowed_images(self):
"""Evaluate allowed_images callable
Or return the list as-is if it's already a dict
"""
if callable(self.allowed_images):
allowed_images = self.allowed_images(self)
if not isinstance(allowed_images, dict):
# always return a dict
allowed_images = {item: item for item in allowed_images}
return allowed_images
return self.allowed_images
@default('options_form')
def _options_form_default(self):
allowed_images = self._get_allowed_images()
if len(allowed_images) <= 1:
# default form only when there are images to choose from
return ''
# form derived from wrapspawner.ProfileSpawner
option_t = '<option value="{image}" {selected}>{image}</option>'
options = [
option_t.format(
image=image, selected='selected' if image == self.image else ''
)
for image in allowed_images
]
return """
<label for="image">Select an image:</label>
<select class="form-control" name="image" required autofocus>
{options}
</select>
""".format(
options=options
)
def options_from_form(self, formdata):
"""Turn options formdata into user_options"""
options = {}
if 'image' in formdata:
options['image'] = formdata['image'][0]
return options
pull_policy = CaselessStrEnum(
["always", "ifnotpresent", "never"],
default_value="ifnotpresent",
config=True,
help="""The policy for pulling the user docker image.
Choices:
- ifnotpresent: pull if the image is not already present (default)
- always: always pull the image to check for updates, even if it is present
- never: never perform a pull
"""
)
container_prefix = Unicode(config=True, help="Deprecated, use `DockerSpawner.prefix`.")
container_name_template = Unicode(
config=True, help="Deprecated, use `DockerSpawner.name_template`."
)
prefix = Unicode(
"jupyter",
config=True,
help=dedent(
"""
Prefix for container names. See name_template for full container name for a particular
user's server.
"""
),
)
name_template = Unicode(
config=True,
help=dedent(
"""
Name of the container or service: with {username}, {imagename}, {prefix}, {servername} replacements.
{raw_username} can be used for the original, not escaped username
(may contain uppercase, special characters).
It is important to include {servername} if JupyterHub's "named
servers" are enabled (JupyterHub.allow_named_servers = True).
If the server is named, the default name_template is
"{prefix}-{username}__{servername}". If it is unnamed, the default
name_template is "{prefix}-{username}".
Note: when using named servers,
it is important that the separator between {username} and {servername}
is not a character that can occur in an escaped {username},
and also not the single escape character '_'.
"""
),
)
@default('name_template')
def _default_name_template(self):
if self.name:
return "{prefix}-{username}__{servername}"
else:
return "{prefix}-{username}"
client_kwargs = Dict(
config=True,
help="Extra keyword arguments to pass to the docker.Client constructor.",
)
volumes = Dict(
config=True,
help=dedent(
"""
Map from host file/directory to container (guest) file/directory
mount point and (optionally) a mode. When specifying the
guest mount point (bind) for the volume, you may use a
dict or str. If a str, then the volume will default to a
read-write (mode="rw"). With a dict, the bind is
identified by "bind" and the "mode" may be one of "rw"
(default), "ro" (read-only), "z" (public/shared SELinux
volume label), and "Z" (private/unshared SELinux volume
label).
If format_volume_name is not set,
default_format_volume_name is used for naming volumes.
In this case, if you use {username} in either the host or guest
file/directory path, it will be replaced with the current
user's name.
"""
),
)
move_certs_image = Unicode(
"busybox:1.30.1",
config=True,
help="""The image used to stage internal SSL certificates.
Busybox is used because we just need an empty container
that waits while we stage files into the volume via .put_archive.
"""
)
@gen.coroutine
def move_certs(self, paths):
self.log.info("Staging internal ssl certs for %s", self._log_name)
yield self.pull_image(self.move_certs_image)
# create the volume
volume_name = self.format_volume_name(self.certs_volume_name, self)
# create volume passes even if it already exists
self.log.info("Creating ssl volume %s for %s", volume_name, self._log_name)
yield self.docker('create_volume', volume_name)
# create a tar archive of the internal cert files
# docker.put_archive takes a tarfile and a running container
# and unpacks the archive into the container
nb_paths = {}
tar_buf = BytesIO()
archive = TarFile(fileobj=tar_buf, mode='w')
for key, hub_path in paths.items():
fname = os.path.basename(hub_path)
nb_paths[key] = '/certs/' + fname
with open(hub_path, 'rb') as f:
content = f.read()
tarinfo = TarInfo(name=fname)
tarinfo.size = len(content)
tarinfo.mtime = os.stat(hub_path).st_mtime
tarinfo.mode = 0o644
archive.addfile(tarinfo, BytesIO(content))
archive.close()
tar_buf.seek(0)
# run a container to stage the certs,
# mounting the volume at /certs/
host_config = self.client.create_host_config(
binds={
volume_name: {"bind": "/certs", "mode": "rw"},
},
)
container = yield self.docker('create_container',
self.move_certs_image,
volumes=["/certs"],
host_config=host_config,
)
container_id = container['Id']
self.log.debug(
"Container %s is creating ssl certs for %s",
container_id[:12], self._log_name,
)
# start the container
yield self.docker('start', container_id)
# stage the archive to the container
try:
yield self.docker(
'put_archive',
container=container_id,
path='/certs',
data=tar_buf,
)
finally:
yield self.docker('remove_container', container_id)
return nb_paths
certs_volume_name = Unicode(
"{prefix}ssl-{username}",
config=True,
help="""Volume name
The same string-templating applies to this
as other volume names.
"""
)
read_only_volumes = Dict(
config=True,
help=dedent(
"""
Map from host file/directory to container file/directory.
Volumes specified here will be read-only in the container.
If format_volume_name is not set,
default_format_volume_name is used for naming volumes.
In this case, if you use {username} in either the host or guest
file/directory path, it will be replaced with the current
user's name.
"""
),
)
format_volume_name = Any(
help="""Any callable that accepts a string template and a DockerSpawner instance as parameters in that order and returns a string.
Reusable implementations should go in dockerspawner.VolumeNamingStrategy, tests should go in ...
"""
).tag(config=True)
@default("format_volume_name")
def _get_default_format_volume_name(self):
return default_format_volume_name
use_docker_client_env = Bool(
True,
config=True,
help="Deprecated. Docker env variables are always used if present.",
)
@observe("use_docker_client_env")
def _client_env_changed(self):
self.log.warning(
"DockerSpawner.use_docker_client_env is deprecated and ignored."
" Docker environment variables are always used if defined."
)
tls_config = Dict(
config=True,
help="""Arguments to pass to docker TLS configuration.
See docker.client.TLSConfig constructor for options.
""",
)
tls = tls_verify = tls_ca = tls_cert = tls_key = tls_assert_hostname = Any(
config=True,
help="""Deprecated, use `DockerSpawner.tls_config` dict to set any TLS options.""",
)
@observe(
"tls", "tls_verify", "tls_ca", "tls_cert", "tls_key", "tls_assert_hostname"
)
def _tls_changed(self, change):
self.log.warning(
"%s config ignored, use %s.tls_config dict to set full TLS configuration.",
change.name,
self.__class__.__name__,
)
remove_containers = Bool(
False, config=True, help="Deprecated, use `DockerSpawner.remove`."
)
remove = Bool(
False,
config=True,
help="""
If True, delete containers when servers are stopped.
This will destroy any data in the container not stored in mounted volumes.
""",
)
@property
def will_resume(self):
# indicate that we will resume,
# so JupyterHub >= 0.7.1 won't cleanup our API token
return not self.remove
extra_create_kwargs = Dict(
config=True, help="Additional args to pass for container create"
)
extra_host_config = Dict(
config=True, help="Additional args to create_host_config for container create"
)
_docker_safe_chars = set(string.ascii_letters + string.digits + "-")
_docker_escape_char = "_"
hub_ip_connect = Unicode(
config=True,
help=dedent(
"""
If set, DockerSpawner will configure the containers to use
the specified IP to connect the hub api. This is useful
when the hub_api is bound to listen on all ports or is
running inside of a container.
"""
),
)
@observe("hub_ip_connect")
def _ip_connect_changed(self, change):
if jupyterhub.version_info >= (0, 8):
warnings.warn(
"DockerSpawner.hub_ip_connect is no longer needed with JupyterHub 0.8."
" Use JupyterHub.hub_connect_ip instead.",
DeprecationWarning,
)
use_internal_ip = Bool(
False,
config=True,
help=dedent(
"""
Enable the usage of the internal docker ip. This is useful if you are running
jupyterhub (as a container) and the user containers within the same docker network.
E.g. by mounting the docker socket of the host into the jupyterhub container.
Default is True if using a docker network, False if bridge or host networking is used.
"""
),
)
@default("use_internal_ip")
def _default_use_ip(self):
# setting network_name to something other than bridge or host implies use_internal_ip
if self.network_name not in {"bridge", "host"}:
return True
else:
return False
use_internal_hostname = Bool(
False,
config=True,
help=dedent(
"""
Use the docker hostname for connecting.
instead of an IP address.
This should work in general when using docker networks,
and must be used when internal_ssl is enabled.
It is enabled by default if internal_ssl is enabled.
"""
),
)
@default("use_internal_hostname")
def _default_use_hostname(self):
# FIXME: replace getattr with self.internal_ssl
# when minimum jupyterhub is 1.0
return getattr(self, 'internal_ssl', False)
links = Dict(
config=True,
help=dedent(
"""
Specify docker link mapping to add to the container, e.g.
links = {'jupyterhub': 'jupyterhub'}
If the Hub is running in a Docker container,
this can simplify routing because all traffic will be using docker hostnames.
"""
),
)
network_name = Unicode(
"bridge",
config=True,
help=dedent(
"""
Run the containers on this docker network.
If it is an internal docker network, the Hub should be on the same network,
as internal docker IP addresses will be used.
For bridge networking, external ports will be bound.
"""
),
)
post_start_cmd = UnicodeOrFalse(
False,
config=True,
help=""" If specified, the command will be executed inside the container
after starting.
Similar to using 'docker exec'
"""
)
@gen.coroutine
def post_start_exec(self):
"""
Execute additional command inside the container after starting it.
e.g. calling 'docker exec'
"""
container = yield self.get_object()
container_id = container[self.object_id_key]
exec_kwargs = {
'cmd': self.post_start_cmd,
'container': container_id
}
exec_id = yield self.docker("exec_create", **exec_kwargs)
return self.docker("exec_start", exec_id=exec_id)
@property
def tls_client(self):
"""A tuple consisting of the TLS client certificate and key if they
have been provided, otherwise None.
"""
if self.tls_cert and self.tls_key:
return (self.tls_cert, self.tls_key)
return None
@property
def volume_mount_points(self):
"""
Volumes are declared in docker-py in two stages. First, you declare
all the locations where you're going to mount volumes when you call
create_container.
Returns a sorted list of all the values in self.volumes or
self.read_only_volumes.
"""
return sorted([value["bind"] for value in self.volume_binds.values()])
@property
def volume_binds(self):
"""
The second half of declaring a volume with docker-py happens when you
actually call start(). The required format is a dict of dicts that
looks like::
{
host_location: {'bind': container_location, 'mode': 'rw'}
}
Mode may be 'ro', 'rw', 'z', or 'Z'.
"""
binds = self._volumes_to_binds(self.volumes, {})
read_only_volumes = {}
# FIXME: replace getattr with self.internal_ssl
# when minimum jupyterhub is 1.0
if getattr(self, 'internal_ssl', False):
# add SSL volume as read-only
read_only_volumes[self.certs_volume_name] = '/certs'
read_only_volumes.update(self.read_only_volumes)
return self._volumes_to_binds(read_only_volumes, binds, mode="ro")
_escaped_name = None
@property
def escaped_name(self):
"""Escape the username so it's safe for docker objects"""
if self._escaped_name is None:
self._escaped_name = self._escape(self.user.name)
return self._escaped_name
def _escape(self, s):
"""Escape a string to docker-safe characters"""
return escape(
s,
safe=self._docker_safe_chars,
escape_char=self._docker_escape_char,
)
object_id = Unicode(allow_none=True)
def template_namespace(self):
escaped_image = self.image.replace("/", "_")
server_name = getattr(self, "name", "")
safe_server_name = self._escape(server_name.lower())
return {
"username": self.escaped_name,
"safe_username": self.escaped_name,
"raw_username": self.user.name,
"imagename": escaped_image,
"servername": safe_server_name,
"raw_servername": server_name,
"prefix": self.prefix,
}
object_name = Unicode()
@default("object_name")
def _object_name_default(self):
"""Render the name of our container/service using name_template"""
return self.name_template.format(**self.template_namespace())
def load_state(self, state):
super(DockerSpawner, self).load_state(state)
if "container_id" in state:
# backward-compatibility for dockerspawner < 0.10
self.object_id = state.get("container_id")
else:
self.object_id = state.get("object_id", "")
# override object_name from state if defined
# to avoid losing track of running servers
self.object_name = state.get("object_name", None) or self.object_name
def get_state(self):
state = super(DockerSpawner, self).get_state()
if self.object_id:
state["object_id"] = self.object_id
# persist object_name if running
# so that a change in the template doesn't lose track of running servers
state["object_name"] = self.object_name
return state
def _public_hub_api_url(self):
proto, path = self.hub.api_url.split("://", 1)
ip, rest = path.split(":", 1)
return "{proto}://{ip}:{rest}".format(
proto=proto, ip=self.hub_ip_connect, rest=rest
)
def _env_keep_default(self):
"""Don't inherit any env from the parent process"""
return []
def get_args(self):
args = super().get_args()
if self.hub_ip_connect:
# JupyterHub 0.7 specifies --hub-api-url
# on the command-line, which is hard to update
for idx, arg in enumerate(list(args)):
if arg.startswith("--hub-api-url="):
args.pop(idx)
break
args.append("--hub-api-url=%s" % self._public_hub_api_url())
return args
def get_env(self):
env = super().get_env()
env['JUPYTER_IMAGE_SPEC'] = self.image
return env
def _docker(self, method, *args, **kwargs):
"""wrapper for calling docker methods
to be passed to ThreadPoolExecutor
"""
m = getattr(self.client, method)
return m(*args, **kwargs)
def docker(self, method, *args, **kwargs):
"""Call a docker method in a background thread
returns a Future
"""
return self.executor.submit(self._docker, method, *args, **kwargs)
@gen.coroutine
def poll(self):
"""Check for my id in `docker ps`"""
container = yield self.get_object()
if not container:
self.log.warning("Container not found: %s", self.container_name)
return 0
container_state = container["State"]
self.log.debug(
"Container %s status: %s", self.container_id[:7], pformat(container_state)
)
if container_state["Running"]:
return None
else:
return (
"ExitCode={ExitCode}, "
"Error='{Error}', "
"FinishedAt={FinishedAt}".format(**container_state)
)
@gen.coroutine
def get_object(self):
self.log.debug("Getting %s '%s'", self.object_type, self.object_name)
try:
obj = yield self.docker("inspect_%s" % self.object_type, self.object_name)
self.object_id = obj[self.object_id_key]
except APIError as e:
if e.response.status_code == 404:
self.log.info(
"%s '%s' is gone", self.object_type.title(), self.object_name
)
obj = None
# my container is gone, forget my id
self.object_id = ""
elif e.response.status_code == 500:
self.log.info(
"%s '%s' is on unhealthy node",
self.object_type.title(),
self.object_name,
)
obj = None
# my container is unhealthy, forget my id
self.object_id = ""
else:
raise
return obj
@gen.coroutine
def get_command(self):
"""Get the command to run (full command + args)"""
if self._user_set_cmd:
cmd = self.cmd
else:
image_info = yield self.docker("inspect_image", self.image)
cmd = image_info["Config"]["Cmd"]
return cmd + self.get_args()
@gen.coroutine
def remove_object(self):
self.log.info("Removing %s %s", self.object_type, self.object_id)
# remove the container, as well as any associated volumes
try:
yield self.docker("remove_" + self.object_type, self.object_id, v=True)
except docker.errors.APIError as e:
if e.status_code == 409:
self.log.debug("Already removing %s: %s", self.object_type, self.object_id)
else:
raise
@gen.coroutine
def check_allowed(self, image):
allowed_images = self._get_allowed_images()
if not allowed_images:
return image
if image not in allowed_images:
raise web.HTTPError(
400,
"Image %s not in allowed list: %s" % (image, ', '.join(allowed_images)),
)
# resolve image alias to actual image name
return allowed_images[image]
@default('ssl_alt_names')
def _get_ssl_alt_names(self):
return ['DNS:' + self.internal_hostname]
@gen.coroutine
def create_object(self):
"""Create the container/service object"""
create_kwargs = dict(
image=self.image,
environment=self.get_env(),
volumes=self.volume_mount_points,
name=self.container_name,
command=(yield self.get_command()),
)
# ensure internal port is exposed
create_kwargs["ports"] = {"%i/tcp" % self.port: None}
create_kwargs.update(self.extra_create_kwargs)
# build the dictionary of keyword arguments for host_config
host_config = dict(binds=self.volume_binds, links=self.links)
if getattr(self, "mem_limit", None) is not None:
# If jupyterhub version > 0.7, mem_limit is a traitlet that can
# be directly configured. If so, use it to set mem_limit.
# this will still be overriden by extra_host_config
host_config["mem_limit"] = self.mem_limit
if not self.use_internal_ip:
host_config["port_bindings"] = {self.port: (self.host_ip,)}
host_config.update(self.extra_host_config)
host_config.setdefault("network_mode", self.network_name)
self.log.debug("Starting host with config: %s", host_config)
host_config = self.client.create_host_config(**host_config)
create_kwargs.setdefault("host_config", {}).update(host_config)
# create the container
obj = yield self.docker("create_container", **create_kwargs)
return obj
@gen.coroutine
def start_object(self):
"""Actually start the container/service
e.g. calling `docker start`
"""
return self.docker("start", self.container_id)
@gen.coroutine
def stop_object(self):
"""Stop the container/service
e.g. calling `docker stop`. Does not remove the container.
"""
return self.docker("stop", self.container_id)
@gen.coroutine
def pull_image(self, image):
"""Pull the image, if needed
- pulls it unconditionally if pull_policy == 'always'
- otherwise, checks if it exists, and
- raises if pull_policy == 'never'
- pulls if pull_policy == 'ifnotpresent'
"""
# docker wants to split repo:tag
# the part split("/")[-1] allows having an image from a custom repo
# with port but without tag. For example: my.docker.repo:51150/foo would not
# pass this test, resulting in image=my.docker.repo:51150/foo and tag=latest
if ':' in image.split("/")[-1]:
# rsplit splits from right to left, allowing to have a custom image repo with port
repo, tag = image.rsplit(':', 1)
else:
repo = image
tag = 'latest'
if self.pull_policy.lower() == 'always':
# always pull
self.log.info("pulling %s", image)
yield self.docker('pull', repo, tag)
# done
return
try:
# check if the image is present
yield self.docker('inspect_image', image)
except docker.errors.NotFound:
if self.pull_policy == "never":
# never pull, raise because there is no such image
raise
elif self.pull_policy == "ifnotpresent":
# not present, pull it for the first time
self.log.info("pulling image %s", image)
yield self.docker('pull', repo, tag)
@gen.coroutine
def start(self, image=None, extra_create_kwargs=None, extra_host_config=None):
"""Start the single-user server in a docker container.
Additional arguments to create/host config/etc. can be specified
via .extra_create_kwargs and .extra_host_config attributes.
If the container exists and `c.DockerSpawner.remove` is true, then
the container is removed first. Otherwise, the existing containers
will be restarted.
"""
if image:
self.log.warning("Specifying image via .start args is deprecated")
self.image = image
if extra_create_kwargs:
self.log.warning(
"Specifying extra_create_kwargs via .start args is deprecated"
)
self.extra_create_kwargs.update(extra_create_kwargs)
if extra_host_config:
self.log.warning(
"Specifying extra_host_config via .start args is deprecated"
)
self.extra_host_config.update(extra_host_config)
# image priority:
# 1. user options (from spawn options form)
# 2. self.image from config
image_option = self.user_options.get('image')
if image_option:
# save choice in self.image
self.image = yield self.check_allowed(image_option)
image = self.image
yield self.pull_image(image)
obj = yield self.get_object()
if obj and self.remove:
self.log.warning(
"Removing %s that should have been cleaned up: %s (id: %s)",
self.object_type,
self.object_name,
self.object_id[:7],
)
yield self.remove_object()
obj = None
if obj is None:
obj = yield self.create_object()
self.object_id = obj[self.object_id_key]
self.log.info(
"Created %s %s (id: %s) from image %s",
self.object_type,
self.object_name,
self.object_id[:7],
self.image,
)
else:
self.log.info(
"Found existing %s %s (id: %s)",
self.object_type,
self.object_name,
self.object_id[:7],
)
# Handle re-using API token.
# Get the API token from the environment variables
# of the running container:
for line in obj["Config"]["Env"]:
if line.startswith(("JPY_API_TOKEN=", "JUPYTERHUB_API_TOKEN=")):
self.api_token = line.split("=", 1)[1]
break
# TODO: handle unpause
self.log.info(
"Starting %s %s (id: %s)",
self.object_type,
self.object_name,
self.container_id[:7],
)
# start the container
yield self.start_object()
if self.post_start_cmd:
yield self.post_start_exec()
ip, port = yield self.get_ip_and_port()
if jupyterhub.version_info < (0, 7):
# store on user for pre-jupyterhub-0.7:
self.user.server.ip = ip
self.user.server.port = port
# jupyterhub 0.7 prefers returning ip, port:
return (ip, port)
@property
def internal_hostname(self):
"""Return our hostname
used with internal SSL
"""
return self.container_name
@gen.coroutine
def get_ip_and_port(self):
"""Queries Docker daemon for container's IP and port.
If you are using network_mode=host, you will need to override
this method as follows::
@gen.coroutine
def get_ip_and_port(self):
return self.host_ip, self.port
You will need to make sure host_ip and port
are correct, which depends on the route to the container
and the port it opens.
"""
if self.use_internal_hostname:
# internal ssl uses hostnames,
# required for domain-name matching with internal SSL
# TODO: should we always do this?
# are there any cases where internal_ip works
# and internal_hostname doesn't?
ip = self.internal_hostname
port = self.port
elif self.use_internal_ip:
resp = yield self.docker("inspect_container", self.container_id)
network_settings = resp["NetworkSettings"]
if "Networks" in network_settings:
ip = self.get_network_ip(network_settings)
else: # Fallback for old versions of docker (<1.9) without network management
ip = network_settings["IPAddress"]
port = self.port
else:
resp = yield self.docker("port", self.container_id, self.port)
if resp is None:
raise RuntimeError("Failed to get port info for %s" % self.container_id)
ip = resp[0]["HostIp"]
port = int(resp[0]["HostPort"])
if ip == "0.0.0.0":
ip = urlparse(self.client.base_url).hostname
if ip == "localnpipe":
ip = "localhost"
return ip, port
def get_network_ip(self, network_settings):
networks = network_settings["Networks"]
if self.network_name not in networks:
raise Exception(
"Unknown docker network '{network}'."
" Did you create it with `docker network create <name>`?".format(
network=self.network_name
)
)
network = networks[self.network_name]
ip = network["IPAddress"]
return ip
@gen.coroutine
def stop(self, now=False):
"""Stop the container
Will remove the container if `c.DockerSpawner.remove` is `True`.
Consider using pause/unpause when docker-py adds support.
"""
self.log.info(
"Stopping %s %s (id: %s)",
self.object_type,
self.object_name,
self.object_id[:7],
)
yield self.stop_object()
if self.remove:
yield self.remove_object()
self.clear_state()
def _volumes_to_binds(self, volumes, binds, mode="rw"):
"""Extract the volume mount points from volumes property.
Returns a dict of dict entries of the form::
{'/host/dir': {'bind': '/guest/dir': 'mode': 'rw'}}
"""
def _fmt(v):
return self.format_volume_name(v, self)
for k, v in volumes.items():
m = mode
if isinstance(v, dict):
if "mode" in v:
m = v["mode"]
v = v["bind"]
binds[_fmt(k)] = {"bind": _fmt(v), "mode": m}
return binds
def _deprecated_method(old_name, new_name, version):
"""Create a deprecated method wrapper for a deprecated method name"""
def deprecated(self, *args, **kwargs):
warnings.warn(
(
"{cls}.{old_name} is deprecated in DockerSpawner {version}."
" Please use {cls}.{new_name} instead."
).format(
cls=self.__class__.__name__,
old_name=old_name,
new_name=new_name,
version=version,
),
DeprecationWarning,
stacklevel=2,
)
old_method = getattr(self, new_name)
return old_method(*args, **kwargs)
return deprecated
# deprecate white/blacklist method names
for _old_name, _new_name, _version in [
("check_image_whitelist", "check_allowed", "0.12.0")
]:
setattr(
DockerSpawner, _old_name, _deprecated_method(_old_name, _new_name, _version),
)
|
the-stack_106_16742
|
__all__ = ["RCNNModelAdapter"]
from icevision.imports import *
from icevision.utils import *
from icevision.metrics import *
from icevision.engines.lightning.lightning_model_adapter import LightningModelAdapter
from icevision.models.torchvision_models.loss_fn import loss_fn
class RCNNModelAdapter(LightningModelAdapter, ABC):
def __init__(self, model: nn.Module, metrics: Sequence[Metric] = None):
super().__init__(metrics=metrics)
self.model = model
@abstractmethod
def convert_raw_predictions(self, raw_preds):
"""Convert raw predictions from the model to library standard."""
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def training_step(self, batch, batch_idx):
(xb, yb), records = batch
preds = self(xb, yb)
loss = loss_fn(preds, yb)
log = {"train/loss": loss}
return {"loss": loss, "log": log}
def validation_step(self, batch, batch_idx):
(xb, yb), records = batch
with torch.no_grad():
self.train()
train_preds = self(xb, yb)
loss = loss_fn(train_preds, yb)
self.eval()
raw_preds = self(xb)
preds = self.convert_raw_predictions(raw_preds=raw_preds)
self.accumulate_metrics(records=records, preds=preds)
return {"valid/loss": loss}
def validation_epoch_end(self, outs):
loss_log = {k: torch.stack(v).mean() for k, v in mergeds(outs).items()}
metrics_log = self.finalize_metrics()
log = {**loss_log, **metrics_log}
return {"val_loss": log["valid/loss"], "log": log}
|
the-stack_106_16743
|
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/mkl:build_defs.bzl", "mkl_repository")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@io_bazel_rules_closure//closure/private:java_import_external.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/keras_applications_archive:workspace.bzl", keras_applications = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
aws()
flatbuffers()
highwayhash()
hwloc()
icu()
keras_applications()
kissfft()
jpeg()
nasm()
pasta()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
sycl_configure(name = "local_config_sycl")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo = "../arm_compiler",
)
mkl_repository(
name = "mkl_linux",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "f4129843d5c2996419f96f10928edd02b2150998861a088dc7cfa1b6a058102a",
strip_prefix = "mklml_lnx_2019.0.3.20190220",
urls = [
"http://mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.18/mklml_lnx_2019.0.3.20190220.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.18/mklml_lnx_2019.0.3.20190220.tgz",
],
)
mkl_repository(
name = "mkl_windows",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "eae0c49a7ed738f0ed97b897e952eaa881feddfa665017a8d5d9d79fd38964b4",
strip_prefix = "mklml_win_2019.0.3.20190220",
urls = [
"http://mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.18/mklml_win_2019.0.3.20190220.zip",
"https://github.com/intel/mkl-dnn/releases/download/v0.18/mklml_win_2019.0.3.20190220.zip",
],
)
mkl_repository(
name = "mkl_darwin",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "53fdcd7e31c309bb6af869d82987d9c6414c1b957d63d10a9caa9ad077643d99",
strip_prefix = "mklml_mac_2019.0.3.20190220",
urls = [
"http://mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.18/mklml_mac_2019.0.3.20190220.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.18/mklml_mac_2019.0.3.20190220.tgz",
],
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
# Important: If you are upgrading MKL-DNN, then update the version numbers
# in third_party/mkl_dnn/mkldnn.BUILD. In addition, the new version of
# MKL-DNN might require upgrading MKL ML libraries also. If they need to be
# upgraded then update the version numbers on all three versions above
# (Linux, Mac, Windows).
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "38a1c02104ee9f630c1ad68164119cd58ad0aaf59e04ccbe7bd5781add7bfbea",
strip_prefix = "mkl-dnn-0.18",
urls = [
"http://mirror.tensorflow.org/github.com/intel/mkl-dnn/archive/v0.18.tar.gz",
"https://github.com/intel/mkl-dnn/archive/v0.18.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
sha256 = "027194c437d6843a3ef4dd2135186733dd12e95a60255aca15c9ac5ba7597378",
strip_prefix = "abseil-cpp-2c8421e1c6cef0da9e8a20b01c15256ec9ec116d",
urls = [
"http://mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/2c8421e1c6cef0da9e8a20b01c15256ec9ec116d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/2c8421e1c6cef0da9e8a20b01c15256ec9ec116d.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "048c7e33df1f1eab470e4b09f14c9ed508b91cb813750b4be2c012991363e735",
strip_prefix = "eigen-eigen-48dfc9c91096",
urls = [
"http://mirror.tensorflow.org/bitbucket.org/eigen/eigen/get/48dfc9c91096.tar.gz",
"https://bitbucket.org/eigen/eigen/get/48dfc9c91096.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "970285762565c7890c6c087d262b0a18286e7d0384f13a37786d8521773bc969",
strip_prefix = "tools-0e906ebc527eab1cdbf7adabff5b474da9562e9f/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf",
urls = [
"http://mirror.tensorflow.org/github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
# Please uncomment me, when the next upgrade happens. Then
# remove the whitelist entry in third_party/repo.bzl.
# "https://github.com/raspberrypi/tools/archive/0e906ebc527eab1cdbf7adabff5b474da9562e9f.tar.gz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "cd8532021352b4a0290d209f7f9bfd7c2411e08286a893af3577a43457287bfa",
strip_prefix = "libxsmm-1.9",
urls = [
"http://mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.9.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.9.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "a31397714a353587413d307337d0b58f8a2e20e2b9d02f2e24e3463fa4eeda81",
strip_prefix = "re2-2018-10-01",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/google/re2/archive/2018-10-01.tar.gz",
"https://github.com/google/re2/archive/2018-10-01.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "06bc735a117ec7ea92ea580e7f2ffa4b1cd7539e0e04f847bf500588d7f0fe90",
strip_prefix = "google-cloud-cpp-0.7.0",
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"http://mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v0.7.0.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v0.7.0.tar.gz",
],
)
tf_http_archive(
name = "com_github_googleapis_googleapis",
build_file = clean_dep("//third_party:googleapis.BUILD"),
sha256 = "824870d87a176f26bcef663e92051f532fac756d1a06b404055dc078425f4378",
strip_prefix = "googleapis-f81082ea1e2f85c43649bee26e0d9871d4b41cdb",
system_build_file = clean_dep("//third_party/systemlibs:googleapis.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
"https://github.com/googleapis/googleapis/archive/f81082ea1e2f85c43649bee26e0d9871d4b41cdb.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "dcf6e2aed522d74ac76b54038c19f0138565f4778a8821ab6679738755ebf6c2",
strip_prefix = "gemmlowp-dec2b7dd5f6f0043070af4587d2a9dc156f4ebab",
urls = [
"http://mirror.tensorflow.org/github.com/google/gemmlowp/archive/dec2b7dd5f6f0043070af4587d2a9dc156f4ebab.zip",
"https://github.com/google/gemmlowp/archive/dec2b7dd5f6f0043070af4587d2a9dc156f4ebab.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0",
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"http://mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png_archive",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "e45ce5f68b1d80e2cb9a2b601605b374bdf51e1798ef1c2c2bd62131dfcf9eef",
strip_prefix = "libpng-1.6.34",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.34.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.34.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "ad68c1216c3a474cf360c7581a4001e952515b3649342100f2d7ca7c8e313da6",
strip_prefix = "sqlite-amalgamation-3240000",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"http://mirror.tensorflow.org/www.sqlite.org/2018/sqlite-amalgamation-3240000.zip",
"https://www.sqlite.org/2018/sqlite-amalgamation-3240000.zip",
],
)
tf_http_archive(
name = "gif_archive",
build_file = clean_dep("//third_party:gif.BUILD"),
sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
strip_prefix = "giflib-5.1.4",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"http://mirror.tensorflow.org/ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"http://mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"http://mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "7068908321ecd2774f145193c4b34a11305bd104b4551b09273dfd1d6a374930",
strip_prefix = "gast-0.2.0",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"http://mirror.tensorflow.org/pypi.python.org/packages/5c/78/ff794fcae2ce8aa6323e789d1f8b3b7765f601e7702726f430e814822b96/gast-0.2.0.tar.gz",
"https://pypi.python.org/packages/5c/78/ff794fcae2ce8aa6323e789d1f8b3b7765f601e7702726f430e814822b96/gast-0.2.0.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"http://mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "3d0f39e0920379ff1393de04b573bca3484d82a5f8b939e9e83b20b6106c9bbe",
strip_prefix = "abseil-py-pypi-v0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
},
urls = [
"http://mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.7.1.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.7.1.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"http://mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"http://mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"http://mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
PROTOBUF_URLS = [
"http://mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.6.1.2.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/v3.6.1.2.tar.gz",
]
PROTOBUF_SHA256 = "2244b0308846bb22b4ff0bcc675e99290ff9f1115553ae9671eba1030af31bc0"
PROTOBUF_STRIP_PREFIX = "protobuf-3.6.1.2"
tf_http_archive(
name = "protobuf_archive",
sha256 = PROTOBUF_SHA256,
strip_prefix = PROTOBUF_STRIP_PREFIX,
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = PROTOBUF_URLS,
)
# We need to import the protobuf library under the names com_google_protobuf
# and com_google_protobuf_cc to enable proto_library support in bazel.
# Unfortunately there is no way to alias http_archives at the moment.
tf_http_archive(
name = "com_google_protobuf",
sha256 = PROTOBUF_SHA256,
strip_prefix = PROTOBUF_STRIP_PREFIX,
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = PROTOBUF_URLS,
)
tf_http_archive(
name = "com_google_protobuf_cc",
sha256 = PROTOBUF_SHA256,
strip_prefix = PROTOBUF_STRIP_PREFIX,
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = PROTOBUF_URLS,
)
tf_http_archive(
name = "nsync",
sha256 = "704be7f58afa47b99476bbac7aafd1a9db4357cef519db361716f13538547ffd",
strip_prefix = "nsync-1.20.2",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/google/nsync/archive/1.20.2.tar.gz",
"https://github.com/google/nsync/archive/1.20.2.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"http://mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"http://mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
strip_prefix = "pcre-8.42",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"http://mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
"http://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
],
)
tf_http_archive(
name = "swig",
build_file = clean_dep("//third_party:swig.BUILD"),
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
strip_prefix = "swig-3.0.8",
system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
urls = [
"http://mirror.tensorflow.org/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "e9c37986337743f37fd14fe8737f246e97aec94b39d1b71e8a5973f72a9fc4f5",
strip_prefix = "curl-7.60.0",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"http://mirror.tensorflow.org/curl.haxx.se/download/curl-7.60.0.tar.gz",
"https://curl.haxx.se/download/curl-7.60.0.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "grpc",
sha256 = "e1e3a9edbfbe4230bee174d4aa45a15c1ec2b203cedb02d20df3e6345d8fa63e",
strip_prefix = "grpc-62688b6a05cc85b47fb77dd408611734253e47e2",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/grpc/grpc/archive/62688b6a05cc85b47fb77dd408611734253e47e2.tar.gz",
"https://github.com/grpc/grpc/archive/62688b6a05cc85b47fb77dd408611734253e47e2.tar.gz",
],
)
tf_http_archive(
name = "com_github_nanopb_nanopb",
sha256 = "8bbbb1e78d4ddb0a1919276924ab10d11b631df48b657d960e0c795a25515735",
build_file = "@grpc//third_party:nanopb.BUILD",
strip_prefix = "nanopb-f8ac463766281625ad710900479130c7fcb4d63b",
urls = [
"http://mirror.tensorflow.org/github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
"https://github.com/nanopb/nanopb/archive/f8ac463766281625ad710900479130c7fcb4d63b.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"http://mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# TODO(phawkins): currently, this rule uses an unofficial LLVM mirror.
# Switch to an official source of snapshots if/when possible.
tf_http_archive(
name = "llvm",
build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
sha256 = "40c6a11055250dd31fefca6748679282588d006ff5ffd17d54ff35a0cb0f0a1e",
strip_prefix = "llvm-98ccb482640a7340dcb299f029f92a17a796fc50",
urls = [
"https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/98ccb482640a7340dcb299f029f92a17a796fc50.tar.gz",
"https://github.com/llvm-mirror/llvm/archive/98ccb482640a7340dcb299f029f92a17a796fc50.tar.gz",
],
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
strip_prefix = "jsoncpp-1.8.4",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "1188e29000013ed6517168600fc35a010d58c5d321846d6a6dfee74e4c788b45",
strip_prefix = "boringssl-7f634429a04abc48e2eb041c81c5235816c96514",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
"https://github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz",
],
)
tf_http_archive(
name = "zlib_archive",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"http://mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "52bb637c70b971958ec79c9c8752b1df5ff0218a4db4510e60826e0cb79b5296",
urls = [
"http://mirror.tensorflow.org/www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz",
"http://www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/google/snappy/archive/1.1.7.tar.gz",
"https://github.com/google/snappy/archive/1.1.7.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
sha256 = "19132b5127fa8e02d95a09795866923f04064c8f1e0770b2b42ab551408882a4",
strip_prefix = "nccl-f93fe9bfd94884cec2ba711897222e0df5569a53",
urls = [
"http://mirror.tensorflow.org/github.com/nvidia/nccl/archive/f93fe9bfd94884cec2ba711897222e0df5569a53.tar.gz",
"https://github.com/nvidia/nccl/archive/f93fe9bfd94884cec2ba711897222e0df5569a53.tar.gz",
],
)
tf_http_archive(
name = "kafka",
build_file = clean_dep("//third_party:kafka/BUILD"),
patch_file = clean_dep("//third_party/kafka:config.patch"),
sha256 = "cc6ebbcd0a826eec1b8ce1f625ffe71b53ef3290f8192b6cae38412a958f4fd3",
strip_prefix = "librdkafka-0.11.5",
urls = [
"http://mirror.tensorflow.org/github.com/edenhill/librdkafka/archive/v0.11.5.tar.gz",
"https://github.com/edenhill/librdkafka/archive/v0.11.5.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"http://mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"http://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"http://mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"http://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"http://mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"http://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"http://mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"http://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "a17501717ef7c8dda4dba73ded50c0d7cde440fd721acfeacbf19786ceac1ed6",
jar_urls = [
"http://mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
"http://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.4.0/checker-qual-2.4.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"http://mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"http://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"http://mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
sha256 = "6bfa06ab52a650ae7ee6963143a0bbc667d6504822cbd9670369b598f18c58c3",
strip_prefix = "cub-1.8.0",
urls = [
"http://mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.8.0.zip",
"https://github.com/NVlabs/cub/archive/1.8.0.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
strip_prefix = "cython-0.28.4",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz",
"https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"http://mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"http://mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"http://mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"http://mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"http://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"http://mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_smartreply",
build_file = clean_dep("//third_party:tflite_smartreply.BUILD"),
sha256 = "8980151b85a87a9c1a3bb1ed4748119e4a85abd3cb5744d83da4d4bd0fbeef7c",
urls = [
"http://mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/smartreply_1.0_2017_11_01.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "21288dccc517acee47fa9648d4d3da28bf0fef5381911ed7b4d2ee36366ffa20",
strip_prefix = "ovic",
urls = [
"http://mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2018_10_23.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2018_10_23.zip",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"http://mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
tf_http_archive(
name = "tbb",
build_file = clean_dep("//third_party/ngraph:tbb.BUILD"),
sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a",
strip_prefix = "tbb-2019_U1",
urls = [
"http://mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip",
"https://github.com/01org/tbb/archive/2019_U1.zip",
],
)
tf_http_archive(
name = "ngraph",
build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"),
sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5",
strip_prefix = "ngraph-0.11.0",
urls = [
"http://mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
"https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"http://mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "ngraph_tf",
build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"),
sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36",
strip_prefix = "ngraph-tf-0.9.0",
urls = [
"http://mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
"https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://mirror.bazel.build/github.com/pybind/pybind11/archive/v2.2.4.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.2.4.tar.gz",
],
sha256 = "b69e83658513215b8d1443544d0549b7d231b9f201f6fc787a2b2218b408181e",
strip_prefix = "pybind11-2.2.4",
build_file = clean_dep("//third_party:pybind11.BUILD"),
)
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# gRPC wants a cares dependency but its contents is not actually
# important since we have set GRPC_ARES=0 in .bazelrc
native.bind(
name = "cares",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@grpc//:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@grpc//:grpc++_unsecure",
)
# Needed by gRPC
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
# Needed by gRPC
native.bind(
name = "nanopb",
actual = "@com_github_nanopb_nanopb//:nanopb",
)
# Needed by gRPC
native.bind(
name = "protobuf",
actual = "@protobuf_archive//:protobuf",
)
# gRPC expects //external:protobuf_clib and //external:protobuf_compiler
# to point to Protobuf's compiler library.
native.bind(
name = "protobuf_clib",
actual = "@protobuf_archive//:protoc_lib",
)
# Needed by gRPC
native.bind(
name = "protobuf_headers",
actual = "@protobuf_archive//:protobuf_headers",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
# Needed by gRPC
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
|
the-stack_106_16745
|
import os
import csv
from vivarium_cell.data.spreadsheets import load_tsv
FLAT_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data", "flat")
LIST_OF_FLAT_FILENAMES = (
os.path.join("wcEcoli_genes.tsv"),
os.path.join("wcEcoli_proteins.tsv"),
os.path.join("wcEcoli_environment_molecules.tsv"),
os.path.join("timelines_def.tsv"),
os.path.join("media_recipes.tsv"),
os.path.join("media", "wcEcoli_base.tsv"),
os.path.join("media", "M9.tsv"),
os.path.join("media", "M9_GLC.tsv"),
os.path.join("media", "5X_supplement_EZ.tsv"),
os.path.join("media", "GLC_G6P.tsv"),
os.path.join("media", "GLC_LCT.tsv"),
os.path.join("media", "ecoli_core_GLC.tsv"),
os.path.join("media", "PURE_Fuji_2014.tsv"),
os.path.join("media", "PURE_Ueda_2010.tsv"),
)
class DataStore(object):
def __init__(self):
pass
class KnowledgeBase(object):
""" KnowledgeBase """
def __init__(self):
# Load raw data from TSV files
for filename in LIST_OF_FLAT_FILENAMES:
self._load_tsv(FLAT_DIR, filename)
self.genes = {
gene['symbol']: gene
for gene in self.wcEcoli_genes}
self.proteins = {
protein['geneId']: protein
for protein in self.wcEcoli_proteins}
def _load_tsv(self, dir_name, file_name):
path = self
steps = file_name.split(os.path.sep)
for subPath in steps[:-1]:
if not hasattr(path, subPath):
setattr(path, subPath, DataStore())
path = getattr(path, subPath)
attrName = steps[-1].split(".")[0]
setattr(path, attrName, [])
file_path = os.path.join(dir_name, file_name)
rows = load_tsv(file_path)
setattr(path, attrName, [row for row in rows])
def concatenate_sequences(self, units):
sequence = ''
for unit in units:
gene = self.genes[unit]
protein = self.proteins[gene['id']]
sequence += protein['seq']
return sequence
|
the-stack_106_16749
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange, range, zip
from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
import pandas.util.testing as tm
class TestSeriesAlterAxes(object):
def test_setindex(self, string_series):
# wrong type
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed")
with pytest.raises(TypeError, match=msg):
string_series.index = None
# wrong length
msg = ("Length mismatch: Expected axis has 30 elements, new"
" values have 29 elements")
with pytest.raises(ValueError, match=msg):
string_series.index = np.arange(len(string_series) - 1)
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_rename(self, datetime_series):
ts = datetime_series
renamer = lambda x: x.strftime('%Y%m%d')
renamed = ts.rename(renamer)
assert renamed.index[0] == renamer(ts.index[0])
# dict
rename_dict = dict(zip(ts.index, renamed.index))
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))
# index with name
renamer = Series(np.arange(4),
index=Index(['a', 'b', 'c', 'd'], name='name'),
dtype='int64')
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name='foo')
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list('abcd'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(['a', 'b', 'c'], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_rename_axis_supported(self):
# Supporting axis for compatibility, detailed in GH-18589
s = Series(range(5))
s.rename({}, axis=0)
s.rename({}, axis='index')
with pytest.raises(ValueError, match='No axis named 5'):
s.rename({}, axis=5)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name='bar')
for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), u"\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
assert s2.name == 'foo'
assert s.name is None
assert s is not s2
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(datetime_series.index[0])
datetime_series.rename(renamer, inplace=True)
assert datetime_series.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
assert 'value' in df
df = ser.reset_index(name='value2')
assert 'value2' in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
tm.assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_name(self):
s = Series([1, 2, 3], index=Index(range(3), name='x'))
assert s.reset_index().index.name is None
assert s.reset_index(drop=True).index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]],
columns=['A', 'B', 'C'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
s = df.set_index(['A', 'B'])['C']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C']])
with pytest.raises(KeyError, match='Level E '):
s.reset_index(level=['A', 'E'])
# With single-level Index
s = df.set_index('A')['B']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df['B'])
with pytest.raises(IndexError, match='Too many levels'):
s.reset_index(level=[0, 1, 2])
# Check that .reset_index([],drop=True) doesn't fail
result = Series(range(4)).reset_index([], drop=True)
expected = Series(range(4))
tm.assert_series_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
s = Series(range(2), name='A', dtype='int64')
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = DataFrame([[0, 0], [1, 1]],
columns=['index', 'A'],
index=RangeIndex(stop=2))
tm.assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
tm.assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
tm.assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
tm.assert_series_equal(result, expected)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],
names=['ll', 'nn'])
s = Series([i for i in range(len(mi))], index=mi)
result = s.rename_axis(index={'ll': 'foo'})
assert result.index.names == ['foo', 'nn']
result = s.rename_axis(index=str.upper, axis=0)
assert result.index.names == ['LL', 'NN']
result = s.rename_axis(index=['foo', 'goo'])
assert result.index.names == ['foo', 'goo']
with pytest.raises(TypeError, match='unexpected'):
s.rename_axis(columns='wrong')
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis('foo')
result = datetime_series
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
def test_set_axis_inplace_axes(self, axis_series):
# GH14636
ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = ser.copy()
expected.index = list('abcd')
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in [(None, FutureWarning), (True, None)]:
result = ser.copy()
kwargs = {'inplace': inplace}
with tm.assert_produces_warning(warn):
result.set_axis(list('abcd'), axis=axis_series, **kwargs)
tm.assert_series_equal(result, expected)
def test_set_axis_inplace(self):
# GH14636
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
# inplace=False
result = s.set_axis(list('abcd'), axis=0, inplace=False)
tm.assert_series_equal(expected, result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = s.set_axis(list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
# wrong values for the "axis" parameter
for axis in [2, 'foo']:
with pytest.raises(ValueError, match='No axis named'):
s.set_axis(list('abcd'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
for axis in [0, 'index']:
with tm.assert_produces_warning(FutureWarning):
result = s.set_axis(0, list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
def test_reset_index_drop_errors(self):
# GH 20925
# KeyError raised for series index when passed level name is missing
s = Series(range(4))
with pytest.raises(KeyError, match='must be same as name'):
s.reset_index('wrong', drop=True)
with pytest.raises(KeyError, match='must be same as name'):
s.reset_index('wrong')
# KeyError raised for series when level to be dropped is missing
s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))
with pytest.raises(KeyError, match='not found'):
s.reset_index('wrong', drop=True)
def test_droplevel(self):
# GH20342
ser = Series([1, 2, 3, 4])
ser.index = MultiIndex.from_arrays([(1, 2, 3, 4), (5, 6, 7, 8)],
names=['a', 'b'])
expected = ser.reset_index('b', drop=True)
result = ser.droplevel('b', axis='index')
tm.assert_series_equal(result, expected)
# test that droplevel raises ValueError on axis != 0
with pytest.raises(ValueError):
ser.droplevel(1, axis='columns')
|
the-stack_106_16752
|
#!/usr/bin/python
import LocalMachine
import BlockDeviceHandler
def hum_readable_list_devices(full_info=False):
if full_info:
cmd="lsblk"
exit_code, stdout, stderr = LocalMachine.run_command(cmd)
if exit_code == 0:
print("[CMD] " + str(cmd))
print(stdout)
print("\n")
cmd="blkid"
exit_code, stdout, stderr = LocalMachine.run_command(cmd)
if exit_code == 0:
print("[CMD] " + str(cmd))
print(stdout)
print("\n")
cmd="sudo fdisk -l | grep -v grep | grep /dev/sd"
exit_code, stdout, stderr = LocalMachine.run_command(cmd)
if exit_code == 0:
print("[CMD] " + str(cmd))
print(stdout)
devices_list = BlockDeviceHandler.list_connected_devices()
for device in devices_list:
device_info = BlockDeviceHandler.get_device_info_data(device)
for key, value in device_info.items():
sep_len = 12 - len(key)
sep = " "*sep_len
print("\t{}:{}{}".format(key, sep, value))
def main():
hum_readable_list_devices()
print("\nFormat your disk to ext4 filesystem:")
device = raw_input("Device (ex. /dev/sda1): ")
label = raw_input("Wanted label (ex. what wou want - ex. drive): ")
BlockDeviceHandler.format_ex4(device, label)
if __name__ == "__main__":
main()
|
the-stack_106_16753
|
# SPDX-FileCopyrightText: 2019 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
This demo shows the latest icons from a connected Apple device on a TFT Gizmo screen.
The A and B buttons on the CircuitPlayground Bluefruit can be used to scroll through all active
notifications. The screen's backlight will turn off after a certain number of seconds to save power.
New notifications or pressing the buttons should turn it back on.
"""
import time
import board
import digitalio
import displayio
import adafruit_ble
from adafruit_ble.advertising.standard import SolicitServicesAdvertisement
from adafruit_ble_apple_notification_center import AppleNotificationCenterService
from adafruit_gizmo import tft_gizmo
from audiocore import WaveFile
from audiopwmio import PWMAudioOut as AudioOut
# Enable the speaker
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.direction = digitalio.Direction.OUTPUT
speaker_enable.value = True
audio = AudioOut(board.SPEAKER)
# This is a whitelist of apps to show notifications from.
APP_ICONS = {
"com.tinyspeck.chatlyio": "/ancs_slack.bmp",
"com.basecamp.bc3-ios": "/ancs_basecamp.bmp",
"com.apple.MobileSMS": "/ancs_sms.bmp",
"com.hammerandchisel.discord": "/ancs_discord.bmp",
"com.apple.mobilecal": "/ancs_ical.bmp",
"com.apple.mobilephone": "/ancs_phone.bmp"
}
BLOCKLIST = []
DELAY_AFTER_PRESS = 15
DEBOUNCE = 0.1
DIM_TIMEOUT = 20 # Amount of timeout to turn off backlight
DIM_LEVEL = 0.05
a = digitalio.DigitalInOut(board.BUTTON_A)
a.switch_to_input(pull=digitalio.Pull.DOWN)
b = digitalio.DigitalInOut(board.BUTTON_B)
b.switch_to_input(pull=digitalio.Pull.DOWN)
file = open("/triode_rise.wav", "rb")
wave = WaveFile(file)
def play_sound():
audio.play(wave)
time.sleep(1)
def find_connection():
for connection in radio.connections:
if AppleNotificationCenterService not in connection:
continue
if not connection.paired:
connection.pair()
return connection, connection[AppleNotificationCenterService]
return None, None
class Dimmer:
def __init__(self):
self._update_time = time.monotonic()
self._level = DIM_LEVEL
self._timeout = DIM_TIMEOUT
def update(self):
self._update_time = time.monotonic()
def check_timeout(self):
if a.value or b.value:
self._update_time = time.monotonic()
if time.monotonic() - self._update_time > self._timeout:
if display.brightness > self._level:
display.brightness = self._level
else:
if display.brightness == self._level:
display.brightness = 1.0
dimmer = Dimmer()
# Start advertising before messing with the display so that we can connect immediately.
radio = adafruit_ble.BLERadio()
advertisement = SolicitServicesAdvertisement()
advertisement.complete_name = "CIRCUITPY"
advertisement.solicited_services.append(AppleNotificationCenterService)
def wrap_in_tilegrid(filename:str):
# CircuitPython 6 & 7 compatible
odb = displayio.OnDiskBitmap(open(filename, "rb"))
return displayio.TileGrid(
odb, pixel_shader=getattr(odb, 'pixel_shader', displayio.ColorConverter())
)
# # CircuitPython 7+ compatible
# odb = displayio.OnDiskBitmap(filename)
# return displayio.TileGrid(odb, pixel_shader=odb.pixel_shader)
display = tft_gizmo.TFT_Gizmo()
group = displayio.Group()
group.append(wrap_in_tilegrid("/ancs_connect.bmp"))
display.show(group)
current_notification = None
current_notifications = {}
all_ids = []
last_press = time.monotonic()
active_connection, notification_service = find_connection()
cleared = False
while True:
if not active_connection:
radio.start_advertising(advertisement)
while not active_connection:
active_connection, notification_service = find_connection()
dimmer.check_timeout()
# Connected
dimmer.update()
play_sound()
no_notifications = "/ancs_none.bmp"
group.append(wrap_in_tilegrid(no_notifications))
while active_connection.connected:
all_ids.clear()
current_notifications = notification_service.active_notifications
for notif_id in current_notifications:
notification = current_notifications[notif_id]
if notification.app_id not in APP_ICONS or notification.app_id in BLOCKLIST:
continue
all_ids.append(notif_id)
# pylint: disable=protected-access
all_ids.sort(key=lambda x: current_notifications[x]._raw_date)
# pylint: enable=protected-access
if current_notification and current_notification.removed:
# Stop showing the latest and show that there are no new notifications.
current_notification = None
if not current_notification and not all_ids and not cleared:
cleared = True
dimmer.update()
group[1] = wrap_in_tilegrid(no_notifications)
elif all_ids:
cleared = False
now = time.monotonic()
if current_notification and current_notification.id in all_ids and \
now - last_press < DELAY_AFTER_PRESS:
index = all_ids.index(current_notification.id)
else:
index = len(all_ids) - 1
if now - last_press >= DEBOUNCE:
if b.value and index > 0:
last_press = now
index += -1
if a.value and index < len(all_ids) - 1:
last_press = now
index += 1
notif_id = all_ids[index]
if not current_notification or current_notification.id != notif_id:
dimmer.update()
current_notification = current_notifications[notif_id]
# pylint: disable=protected-access
print(current_notification._raw_date, current_notification)
# pylint: enable=protected-access
group[1] = wrap_in_tilegrid(APP_ICONS[current_notification.app_id])
dimmer.check_timeout()
# Bluetooth Disconnected
group.pop()
dimmer.update()
active_connection = None
notification_service = None
|
the-stack_106_16756
|
#!/usr/bin/env python3
__package__ = 'archivebox.cli'
import os
import sys
import shutil
import unittest
from pathlib import Path
from contextlib import contextmanager
TEST_CONFIG = {
'USE_COLOR': 'False',
'SHOW_PROGRESS': 'False',
'OUTPUT_DIR': 'data.tests',
'SAVE_ARCHIVE_DOT_ORG': 'False',
'SAVE_TITLE': 'False',
'USE_CURL': 'False',
'USE_WGET': 'False',
'USE_GIT': 'False',
'USE_CHROME': 'False',
'USE_YOUTUBEDL': 'False',
}
OUTPUT_DIR = 'data.tests'
os.environ.update(TEST_CONFIG)
from ..main import init
from ..index import load_main_index
from ..config import (
SQL_INDEX_FILENAME,
JSON_INDEX_FILENAME,
HTML_INDEX_FILENAME,
)
from . import (
archivebox_init,
archivebox_add,
archivebox_remove,
)
HIDE_CLI_OUTPUT = True
test_urls = '''
https://example1.com/what/is/happening.html?what=1#how-about-this=1
https://example2.com/what/is/happening/?what=1#how-about-this=1
HTtpS://example3.com/what/is/happening/?what=1#how-about-this=1f
https://example4.com/what/is/happening.html
https://example5.com/
https://example6.com
<test>http://example7.com</test>
[https://example8.com/what/is/this.php?what=1]
[and http://example9.com?what=1&other=3#and-thing=2]
<what>https://example10.com#and-thing=2 "</about>
abc<this["https://subb.example11.com/what/is#and-thing=2?whoami=23&where=1"]that>def
sdflkf[what](https://subb.example12.com/who/what.php?whoami=1#whatami=2)?am=hi
example13.bada
and example14.badb
<or>htt://example15.badc</that>
'''
stdout = sys.stdout
stderr = sys.stderr
@contextmanager
def output_hidden(show_failing=True):
if not HIDE_CLI_OUTPUT:
yield
return
sys.stdout = open('stdout.txt', 'w+', encoding='utf-8')
sys.stderr = open('stderr.txt', 'w+', encoding='utf-8')
try:
yield
sys.stdout.close()
sys.stderr.close()
sys.stdout = stdout
sys.stderr = stderr
except Exception:
sys.stdout.close()
sys.stderr.close()
sys.stdout = stdout
sys.stderr = stderr
if show_failing:
with open('stdout.txt', 'r', encoding='utf-8') as f:
print(f.read())
with open('stderr.txt', 'r', encoding='utf-8') as f:
print(f.read())
raise
finally:
os.remove('stdout.txt')
os.remove('stderr.txt')
class TestInit(unittest.TestCase):
def setUp(self):
os.makedirs(OUTPUT_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
def test_basic_init(self):
with output_hidden():
archivebox_init.main([])
assert (Path(OUTPUT_DIR) / SQL_INDEX_FILENAME).exists()
assert (Path(OUTPUT_DIR) / JSON_INDEX_FILENAME).exists()
assert (Path(OUTPUT_DIR) / HTML_INDEX_FILENAME).exists()
assert len(load_main_index(out_dir=OUTPUT_DIR)) == 0
def test_conflicting_init(self):
with open(Path(OUTPUT_DIR) / 'test_conflict.txt', 'w+', encoding='utf-8') as f:
f.write('test')
try:
with output_hidden(show_failing=False):
archivebox_init.main([])
assert False, 'Init should have exited with an exception'
except SystemExit:
pass
assert not (Path(OUTPUT_DIR) / SQL_INDEX_FILENAME).exists()
assert not (Path(OUTPUT_DIR) / JSON_INDEX_FILENAME).exists()
assert not (Path(OUTPUT_DIR) / HTML_INDEX_FILENAME).exists()
try:
load_main_index(out_dir=OUTPUT_DIR)
assert False, 'load_main_index should raise an exception when no index is present'
except Exception:
pass
def test_no_dirty_state(self):
with output_hidden():
init()
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
with output_hidden():
init()
class TestAdd(unittest.TestCase):
def setUp(self):
os.makedirs(OUTPUT_DIR, exist_ok=True)
with output_hidden():
init()
def tearDown(self):
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
def test_add_arg_url(self):
with output_hidden():
archivebox_add.main(['https://getpocket.com/users/nikisweeting/feed/all'])
all_links = load_main_index(out_dir=OUTPUT_DIR)
assert len(all_links) == 30
def test_add_arg_file(self):
test_file = Path(OUTPUT_DIR) / 'test.txt'
with open(test_file, 'w+', encoding='utf') as f:
f.write(test_urls)
with output_hidden():
archivebox_add.main([test_file])
all_links = load_main_index(out_dir=OUTPUT_DIR)
assert len(all_links) == 12
os.remove(test_file)
def test_add_stdin_url(self):
with output_hidden():
archivebox_add.main([], stdin=test_urls)
all_links = load_main_index(out_dir=OUTPUT_DIR)
assert len(all_links) == 12
class TestRemove(unittest.TestCase):
def setUp(self):
os.makedirs(OUTPUT_DIR, exist_ok=True)
with output_hidden():
init()
archivebox_add.main([], stdin=test_urls)
# def tearDown(self):
# shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
def test_remove_exact(self):
with output_hidden():
archivebox_remove.main(['--yes', '--delete', 'https://example5.com/'])
all_links = load_main_index(out_dir=OUTPUT_DIR)
assert len(all_links) == 11
def test_remove_regex(self):
with output_hidden():
archivebox_remove.main(['--yes', '--delete', '--filter-type=regex', r'http(s)?:\/\/(.+\.)?(example\d\.com)'])
all_links = load_main_index(out_dir=OUTPUT_DIR)
assert len(all_links) == 4
def test_remove_domain(self):
with output_hidden():
archivebox_remove.main(['--yes', '--delete', '--filter-type=domain', 'example5.com', 'example6.com'])
all_links = load_main_index(out_dir=OUTPUT_DIR)
assert len(all_links) == 10
def test_remove_none(self):
try:
with output_hidden(show_failing=False):
archivebox_remove.main(['--yes', '--delete', 'https://doesntexist.com'])
assert False, 'Should raise if no URLs match'
except Exception:
pass
if __name__ == '__main__':
if '--verbose' in sys.argv or '-v' in sys.argv:
HIDE_CLI_OUTPUT = False
unittest.main()
|
the-stack_106_16758
|
#!/usr/bin/env runaiida
#Not required by AiiDA
import os.path as op
import sys
#AiiDA classes and functions
from aiida.engine import submit
from aiida.orm import load_code, load_node
from aiida.orm import (Str, List, Dict, StructureData, KpointsData, Int, Float)
from aiida_pseudo.data.pseudo.psf import PsfData
from aiida_siesta.workflows.simplex_basis import SimplexBasisOptimization
from aiida_siesta.workflows.two_steps_optimization import TwoStepsBasisOpt
try:
codename = sys.argv[1]
except IndexError:
codename = 'SiestaHere@localhost'
#The code
code = load_code(codename)
#Structure
alat = 5.430 # angstrom
cell = [
[
0.5 * alat,
0.5 * alat,
0.,
],
[
0.,
0.5 * alat,
0.5 * alat,
],
[
0.5 * alat,
0.,
0.5 * alat,
],
]
#The atom positions were originally given in the "ScaledCartesian" format
#but standard for aiida structures is Cartesian in Angstrom
structure = StructureData(cell=cell)
structure.append_atom(position=(0.000 * alat, 0.000 * alat, 0.000 * alat),
symbols=['Si'])
structure.append_atom(position=(0.250 * alat, 0.250 * alat, 0.250 * alat),
symbols=['Si'])
#The parameters
parameters = Dict(
dict={
'meshcutoff': '100 Ry',
'xc-functional': 'GGA',
'xc-authors': 'PBE',
'max-scfiterations': 4000,
'scf-mixerhistory': 5,
'scf-mixerweight': 0.1,
'scf-dm-tolerance': 0.0001,
'Solution-method': 'diagon',
'electronic-temperature': '25 meV',
'write-forces': True,
})
#The basis set 'pao-split-tail-norm':"T",
basis = Dict(
dict={
'%block pao-basis': "\nSi 2\n n=3 0 2\n 4.99376 $sz2 \n n=3 1 2 P 1\n 6.2538 $pz2 \n%endblock pao-basis"
})
#The kpoints
kpoints = KpointsData()
kpoints.set_kpoints_mesh([8, 8, 8])
#Pseudos
pseudos_dict = {}
raw_pseudos = [("Si.psf", ['Si'])]
for fname, kinds in raw_pseudos:
absname = op.realpath(op.join(op.dirname(__file__), "../fixtures/sample_psf", fname))
pseudo = PsfData.get_or_create(absname)
if not pseudo.is_stored:
print("\nCreated the pseudo for {}".format(kinds))
else:
print("\nUsing the pseudo for {} from DB: {}".format(kinds, pseudo.pk))
for j in kinds:
pseudos_dict[j]=pseudo
#Resources
options = Dict(
dict={
"max_wallclock_seconds": 36000,
"resources": {
"num_machines": 1,
"num_mpiprocs_per_machine": 1,
}
})
#The submission
inputs = {
'siesta_base': {
'structure': structure,
'parameters': parameters,
'code': code,
'basis': basis,
'kpoints': kpoints,
'pseudos': pseudos_dict,
'options': options
},
'simplex': {
# 'max_iters': Int(4),
'output_name': Str("basis_enthalpy"),
'variables_dict': Dict(dict={
"sz2":[2.0,4.8,3.0],
"pz2":[2.0,6.0,3.0]
}),
},
#'macrostep':{'lambda_scaling_factor': Float(0.2)},
}
process = submit(SimplexBasisOptimization, **inputs)
#process = submit(TwoStepsBasisOpt, **inputs)
print(f"Submitted workchain; ID={process.pk}")
print(f"For information about this workchain type: verdi process show {process.pk}")
print("For a list of running processes type: verdi process list")
|
the-stack_106_16759
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import collections
import json
import re
import modeling
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "")
flags.DEFINE_string("output_file", None, "")
flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("master", None,
"If using a TPU, the address of the master.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"If True, tf.one_hot will be used for embedding lookups, otherwise "
"tf.nn.embedding_lookup will be used. On TPUs, this should be True "
"since it is much faster.")
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def input_fn_builder(features, seq_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (example.unique_id))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
examples = read_examples(FLAGS.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
predict_batch_size=FLAGS.batch_size)
input_fn = input_fn_builder(
features=features, seq_length=FLAGS.max_seq_length)
with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file,
"w")) as writer:
for result in estimator.predict(input_fn, yield_single_examples=True):
unique_id = int(result["unique_id"])
feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = result["layer_output_%d" % j]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(float(x), 6) for x in layer_output[i:(i + 1)].flat
]
all_layers.append(layers)
features = collections.OrderedDict()
features["token"] = token
features["layers"] = all_layers
all_features.append(features)
output_json["features"] = all_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("output_file")
tf.app.run()
|
the-stack_106_16760
|
from message import *
from database import Database
from global_manager import GlobalManger
from utils import *
from user import User
import socket
import traceback
import threading
class Transfer(threading.Thread):
"""
数据交互类
维持客户端与服务器的数据交互和客户端的消息转发
"""
def __init__(self, sock: socket.socket, name=None):
super().__init__(name=name)
self._func_map = {Message.Cmd.Logout: self._logout_msg,
Message.Cmd.Chat: self._chat_msg,
Message.Cmd.AddDelFriend: self._add_del_friend_msg,
Message.Cmd.AddDelGroup: self._add_del_group_msg,
Message.Cmd.AcceptDenyReq: self._accept_friend_msg,
Message.Cmd.QueryFriendInfo: self._get_friend_info_msg,
Message.Cmd.FindFriend: self._find_friend_msg,
Message.Cmd.SetInfo: self._set_user_info_msg,
Message.Cmd.ModifyPassword: self._modify_password_msg,
Message.Cmd.ReqUserInfo: self._req_user_info_msg}
self._sock = sock
self._peer = self._sock.getpeername()
self._user = None
self._req_friend_msg = []
self._global_info = GlobalManger()
self._db = Database.create_db()
self._be_quit = False
def run(self):
""" 主循环 """
retry = 100
while not self._be_quit:
try:
data = self._sock.recv(4096)
except Exception as e:
print(e)
break
if not data:
break
try:
msg = Message.create_msg(data.decode('utf-8'))
except Exception as e:
print(traceback.format_exc())
self._send_receipt(0, False, str(e))
if --retry == 0:
self.ready2exit()
continue
# print('%s recv : %s' % (threading.current_thread(), msg.msg))
# 没有登陆使用未登录的函数处理消息
if not self._user:
self.__nologin_process(msg)
else:
self.__has_logged_process(msg)
# 将自己从全局信息类中删除,再断开socket连接
# 防止先断开连接的时候,其他进程调用本类的send方法发送数据
if self._user:
self._global_info.del_connect(self._user.ID)
self._sock.close()
print('%s----exit' % self.name)
@use_log
def _login_process(self, msg):
""" 处理用户登陆 """
assert type(msg) == LoginMsg
# 判断用户是否已经登陆
if self._global_info.is_login(msg.ID):
self._send_user_info(msg.cmd, False, '用户已经登陆')
self.ready2exit()
else:
# 判断用户名和密码
if self._db.check_user_pwd(msg.ID, msg.pwd):
print(msg.ID, '登陆成功')
self._user = self._db.query_user(msg.ID)
self._global_info.add_connect(msg.ID, self)
self._user.online = True
self._notify_friend(True)
self._send_user_info(msg.cmd)
# 将线程名改为 "用户:ID"
self.name = "用户:" + self._user.ID
else:
self._send_user_info(msg.cmd, False, 'id未注册或密码错误')
print(msg.ID, msg.pwd, '登陆失败')
@use_log
def _register_process(self, msg):
""" 处理用户注册 """
assert type(msg) == RegisterMsg
print(msg.nick_name, '注册成功')
# 分配ID, 存储用户, 返回消息
ID = self._db.distribution_id()
user = User(ID=ID, nick_name=msg.nick_name)
self._db.add_user(user, msg.pwd)
self._send_receipt(msg.cmd, ID=ID)
# print(user)
@use_log
def _logout_msg(self, **kwargs):
""" 注销处理 """
msg = kwargs.get('msg')
assert type(msg) == LogoutMsg
self._send_receipt(msg.cmd)
self._user.online = False
self._notify_friend(False)
self.ready2exit()
@use_log
def _chat_msg(self, **kwargs):
""" 聊天消息转发 """
msg = kwargs.get('msg')
assert type(msg) == ChatMsg
# 将消息转发给toID的用户
toID = msg.friend_id
msg.friend_id = self._user.ID
msg.nick_name = self._user.nick_name
if toID == self._user.ID:
self._send_receipt(msg.cmd, False, '不能发送消息给自己')
elif self._user.has_friend(toID)[0]:
self._global_info.send_msg2id(toID, msg)
self._send_receipt(msg.cmd)
else:
self._send_receipt(msg.cmd, False, str('%s 不是您的朋友' % toID))
@use_log
def _add_del_friend_msg(self, **kwargs):
""" 添加或删除好友 """
msg = kwargs.get('msg')
assert type(msg) == AddDelFriendMsg
if msg.add:
fid = msg.friend_id
# id 不能为自己 且 在DB中找到用户
if fid != self._user.ID and self._db.query_user(msg.friend_id):
# 保存要添加的好友的消息
self._save_req_friend_msg(msg)
# 转发添加好友的消息,将ID和昵称换成自己的
# 这里需要创建新的对象,否则改掉id后会影响保存的消息数据
trans_msg = AddDelFriendMsg(AddDelFriendMsg.DefaultMsg)
trans_msg.friend_id = self._user.ID
trans_msg.nick_name = self._user.nick_name
trans_msg.group = None
self._global_info.send_msg2id(fid, trans_msg)
self._send_receipt(msg.cmd, True, 'success')
else: # 在DB中未找到用户
self._send_receipt(msg.cmd, False, '没有找到id为 %s 的用户', fid)
else: # 删除好友
if self._user.del_friend4group(msg.friend_id, msg.group):
self._db.data_changed()
self._send_receipt(msg.cmd)
else:
self._send_receipt(msg.cmd, False, '好友或分组不存在')
@use_log
def _accept_friend_msg(self, **kwargs):
""" 同意或拒绝好友添加 """
msg = kwargs.get('msg')
assert type(msg) == AcceptDenyReqMsg
if msg.accept:
# 查找同意的ID是否在请求列表中
req_msg = self._find_save_req_msg(msg)
if not req_msg:
self._send_receipt(msg.cmd, False, str('%s 的用户没有添加您或消息已过期' % msg.friend_id))
return
# 在_req_friend_ids中的id一定是能被查询到的,在_add_del_friend中已经做过检查
self._user.add_friend2group(msg.friend_id, msg.group)
self._send_receipt(msg.cmd)
self._remove_req_msg(msg)
# 如果拒绝添加,直接转发
msg.group = None
fid = msg.friend_id
msg.friend_id = self._user.ID
msg.nick_name = self._user.nick_name
self._global_info.send_msg2id(fid, msg)
@use_log
def _get_friend_info_msg(self, **kwargs):
""" 获取好友信息 """
msg = kwargs.get('msg')
assert type(msg) == QueryFriendInfoMsg
if not self._user.find_friend(msg.friend_id):
self._send_receipt(msg.cmd, False, str('not found friend %d' % msg.friend_id))
return
# 获取好友信息,返回好友信息
ret_msg = RetFriendInfoMsg(RetFriendInfoMsg.DefaultMsg)
ret_msg.result = self._user.public_info()
self.send(ret_msg)
@use_log
def _find_friend_msg(self, **kwargs):
""" 查找朋友 """
msg = kwargs.get('msg')
assert type(msg) == FindFriendMsg
ret_msg = RetFindResultMsg(RetFindResultMsg.DefaultMsg)
ret_msg.result = []
users = [] # 防止id和nick都为None时出现users没定义的异常
if msg.friend_id:
# 按ID查询
users = self._db.find_user4id(msg.friend_id, msg.fuzzy)
elif msg.nick_name:
# 按昵称查询
users = self._db.find_user4nickname(msg.nick_name, msg.fuzzy)
for user in users:
if user.allow_find: # 用户允许被查找则添加到结果中
ret_msg.result.append(user.simple_info())
self.send(ret_msg)
@use_log
def _set_user_info_msg(self, **kwargs):
""" 设置个人信息 """
msg = kwargs.get('msg')
assert type(msg) == SetInfoMsg
self._user.nick_name = msg.nick_name
self._user.sex = msg.sex
self._user.birthday = msg.birthday
self._user.desc = msg.desc
self._user.ext_info = msg.ext_info
self._user.allow_find = msg.allow_find
self._send_receipt(msg.cmd, True, 'success')
self._db.data_changed()
@use_log
def _modify_password_msg(self, **kwargs):
""" 修改密码 """
msg = kwargs.get('msg')
assert type(msg) == ModifyPasswordMsg
if self._db.modify_pwd(self._user.ID, msg.old_pwd, msg.new_pwd):
self._send_receipt(msg.cmd)
else:
self._send_receipt(msg.cmd, False, 'password error')
@use_log
def _add_del_group_msg(self, **kwargs):
""" 添加或删除分组 """
msg = kwargs.get('msg')
assert type(msg) == AddDelGroupMsg
reason = None
if msg.add:
if not self._user.add_group(msg.group):
reason = '分组已存在'
else:
if not self._user.del_group(msg.group, msg.moveto):
reason = '分组不存在'
if reason:
self._send_receipt(msg.cmd, False, reason)
else:
self._db.data_changed()
self._send_receipt(msg.cmd)
@use_log
def _req_user_info_msg(self, **kwargs):
msg = kwargs.get('msg')
assert type(msg) == ReqUserInfoMsg
self._send_user_info(msg.cmd)
def _notify_friend(self, online: bool):
""" 通知好友上线/通知好友下线 """
msg = RetOnlineNotifyMsg(RetOnlineNotifyMsg.DefaultMsg)
# 此处是自己的ID,告诉朋友自己上线了
msg.friend_id = self._user.ID
msg.nick_name = self._user.nick_name
msg.online = online
# 遍历好友列表,逐个通知好友
for flist in self._user.groups.values():
for friend_id in flist:
self._global_info.send_msg2id(friend_id, msg)
def __nologin_process(self, msg: Message) -> bool:
""" 用户未登陆时的消息处理 """
if msg.cmd == Message.Cmd.Login:
self._login_process(msg)
elif msg.cmd == Message.Cmd.Register:
self._register_process(msg)
else:
self._send_receipt(msg.cmd, False, '请登录后操作')
return False
return True
def __has_logged_process(self, msg: Message) -> bool:
""" 用户登陆后的消息处理 """
ret = False
func = self._func_map.get(msg.cmd)
if not func:
self._send_receipt(msg.cmd, False, '命令错误')
else:
try:
func(msg=msg)
ret = True
except AssertionError:
# 断言错误, 一般不可能产生这种错误, 因为消息是按照命令来解析的
print(traceback.format_exc())
self.ready2exit()
self._send_receipt(msg.cmd, False, 'message format error')
except Exception as e:
print(traceback.format_exc())
self._send_receipt(msg.cmd, False, str(e))
return ret
def _send_user_info(self, execmd, succ=True, reason='success'):
""" 发送用户信息 """
msg = RetUserInfoMsg(RetUserInfoMsg.DefaultMsg)
msg.exe_cmd = execmd
msg.success = succ
msg.reason = reason
if succ:
msg.user = self._user.__dict__()
for key, group in self._user.groups.items():
for friend_id in group:
friend = self._db.query_user(friend_id)
if friend:
msg.user['groups'][key] = friend.simple_info()
else:
msg.user = {}
self.send(msg)
def _send_receipt(self, exec_cmd, succ=True, reason='success', ID='0'):
""" 发送消息回执 """
msg = ReceiptMsg(ReceiptMsg.DefaultMsg)
msg.success = succ
msg.reason = reason
msg.exe_cmd = exec_cmd
msg.ID = ID
self.send(msg)
def _save_req_friend_msg(self, msg: Message):
""" 将请求加好友的ID存储起来 """
self._req_friend_msg.append(msg)
# 最多保存100个请求
if len(self._req_friend_msg) > 50:
del self._req_friend_msg[0]
def _find_save_req_msg(self, msg: Message):
for m in self._req_friend_msg:
if m.friend_id == msg.friend_id:
return m
return None
def _remove_req_msg(self, msg: Message):
reserved_msg = []
# 可能有多个相同的好友请求,所以需要全部遍历
for m in self._req_friend_msg:
if m.friend_id != msg.friend_id:
reserved_msg.append(m)
self._req_friend_msg = reserved_msg
def ready2exit(self):
""" 准备退出 """
self._be_quit = True
self._sock.shutdown(socket.SHUT_RD)
def send(self, msg: Message):
""" 消息发送函数 """
self._sock.send(msg.msg_crypto_bytes)
def recv_notify(self, msg: Message) -> bool:
""" 接受其他用户发来的消息 """
# 请求添加好友的消息
if type(msg) == AddDelFriendMsg:
# 保存请求添加好友的ID,在将消息转发给自己的客户端
self._save_req_friend_msg(msg)
self.send(msg)
return True
# 好友同意或拒绝添加自己
if type(msg) == AcceptDenyReqMsg:
if msg.accept:
# 由于只有自己的请求中才含有分组信息,所以需要将自己发送的请求消息拿出来
req_msg = self._find_save_req_msg(msg)
self._user.add_friend2group(msg.friend_id, req_msg.group)
self._remove_req_msg(msg)
self.send(msg)
if type(msg) == RetOnlineNotifyMsg:
self.send(msg)
return True
if type(msg) == ChatMsg:
self.send(msg)
return True
def ID(self):
return self.name
|
the-stack_106_16761
|
'''
implemented by PyTorch.
'''
import numpy as np
import torch.nn as nn
import torch
from torch.optim import Adam
from typing import Tuple
import os
class ReplayBuffer:
def __init__(self, state_dim, max_size=10000, device=torch.device('cpu')):
self.device = device
self.state_buffer = torch.empty((max_size, state_dim), dtype=torch.float32, device=device)
self.other_buffer = torch.empty((max_size, 3), dtype=torch.float32, device=device)
self.index = 0
self.max_size = max_size
self.total_len = 0
def append(self, state, other):
self.index = self.index % self.max_size
self.total_len = max(self.index, self.total_len)
self.state_buffer[self.index] = torch.as_tensor(state, device=self.device)
self.other_buffer[self.index] = torch.as_tensor(other, device=self.device)
self.index += 1
def sample_batch(self, batch_size):
indices = np.random.randint(0, self.total_len - 1, batch_size)
return (
self.state_buffer[indices], # S_t
self.other_buffer[indices, 2:].long(), # a_t
self.other_buffer[indices, 0], # r_t
self.other_buffer[indices, 1], # done
self.state_buffer[indices + 1]
)
class QNet(nn.Module):
def __init__(self, obs_dim: int, action_dim: int, mid_dim: int = 256) -> None:
'''
:param obs_dim: the dim of observation. type: int. for gym env: obs_dim = env.observation_space.shape[0]
:param action_dim: action space, i.e: The number of actions that can be taken at each step. type:int. for gym env: action_dim = env.action_space.n
:param mid_dim: hidden size of MLP.
'''
super(QNet, self).__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.encoder = nn.Sequential(
nn.Linear(obs_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim)
)
def forward(self, state: torch.FloatTensor) -> torch.FloatTensor:
# return Q(s, a). the estimated state-action value.
return self.encoder(state)
def load_and_save_weight(self, path, mode='load'):
if mode == 'load':
if os.path.exists(path):
self.load_state_dict(torch.load(path))
else:
torch.save(self.state_dict(), path)
class DeepQnetwork:
def __init__(self, obs_dim: int, action_dim: int):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.learning_tate = 1e-4
self.tau = 2 ** -8 # soft update.
self.gamma = 0.99 # discount factor.
self.batch_size = 128
self.memory_size = 100000
self.explore_rate = 0.2 # epsilon greedy rate.
'''
for exploring in the env, each time will collect self.target_step * self.batch_size number of samples into buffer,
for updating neural network, each time will update self.target_step * self.repeat_time times.
'''
self.target_step = 1024
self.repeat_time = 1
self.reward_scale = 1.
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.buffer = ReplayBuffer(obs_dim, self.memory_size, self.device)
self.QNet = QNet(obs_dim, action_dim).to(self.device)
self.QNet_target = QNet(obs_dim, action_dim).to(self.device) # Q target.
self.optimizer = Adam(self.QNet.parameters(), self.learning_tate)
self.loss_func = nn.MSELoss(reduction='mean')
def select_action(self, state: np.ndarray) -> int:
# using epsilon greedy algorithm to select the action.
if np.random.random() < self.explore_rate: # epsilon greedy.
action = np.random.randint(self.action_dim)
else:
state = torch.as_tensor((state,), dtype=torch.float32, device=self.device).detach_()
dist = self.QNet(state)[0]
action = dist.argmax(dim=0).cpu().numpy()
return action
def explore_env(self, env, all_greedy=False) -> int:
# to collect samples into replay buffer.
state = env.reset()
for _ in range(self.target_step):
action = np.random.randint(self.action_dim) if all_greedy else self.select_action(state)
state_, reward, done, _ = env.step(int(action))
other = (reward * self.reward_scale, 0.0 if done else self.gamma, action)
self.buffer.append(state, other)
state = env.reset() if done else state_
return self.target_step
@staticmethod
def soft_update(eval_net, target_net, tau) -> None:
# soft update for network. the equation: W_1 * tau + W_2 * (1 - tau)
for target_param, local_param in zip(target_net.parameters(), eval_net.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def update(self) -> None:
# update the neural network.
for _ in range(self.target_step * self.repeat_time):
state, action, reward, mask, state_ = self.buffer.sample_batch(self.batch_size)
# Q(s_t, a_t) = r_t + \gamma * max Q(s_{t+1}, a)
next_q = self.QNet_target(state_).detach().max(1)[0]
q_target = reward + mask * next_q
q_eval = self.QNet(state).gather(1, action)
loss = self.loss_func(q_eval, q_target.view(self.batch_size, 1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.soft_update(self.QNet, self.QNet_target, self.tau)
def evaluate(self, env, render=False):
epochs = 20
res = np.zeros((epochs,))
obs = env.reset()
index = 0
while index < epochs:
if render: env.render()
obs = torch.as_tensor((obs,), dtype=torch.float32, device=self.device).detach_()
dist = self.QNet(obs)[0]
action = dist.argmax(dim=0).cpu().numpy()
s_, reward, done, _ = env.step(int(action))
res[index] += reward
if done:
index += 1
obs = env.reset()
else:
obs = s_
return res.mean(), res.std()
def demo_test():
import time
import gym
import highway_env
from copy import deepcopy
from utils import plot_learning_curve
torch.manual_seed(0)
env_id = 'merge-v0' # 'CartPole-v0'
env = gym.make(env_id)
env = gym.wrappers.FlattenObservation(env)
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = DeepQnetwork(obs_dim, action_dim)
# using random explore to collect samples.
agent.explore_env(deepcopy(env), all_greedy=True)
total_step = 1000000
eval_env = deepcopy(env)
step = 0
target_return = 15
avg_return = 0
t = time.time()
step_record = []
episode_return_mean = []
episode_return_std = []
init_save = 10000
while step < total_step and avg_return < target_return - 1:
step += agent.explore_env(env)
agent.update()
avg_return, std_return = agent.evaluate(eval_env)
print(f'current step:{step}, episode return:{avg_return}')
episode_return_mean.append(avg_return)
episode_return_std.append(std_return)
step_record.append(step)
plot_learning_curve(step_record, episode_return_mean, episode_return_std, 'highway_plot_learning_curve.jpg')
if step > init_save:
agent.QNet.load_and_save_weight(f'BreakoutDQN.weight', mode='save')
init_save += init_save
agent.QNet.load_and_save_weight(f'HighwayDQN.weight', mode='save')
t = time.time() - t
print('total cost time:', t, 's')
plot_learning_curve(step_record, episode_return_mean, episode_return_std)
agent.evaluate(eval_env, render=True)
if __name__ == '__main__':
demo_test()
|
the-stack_106_16762
|
#!/usr/bin/env python
# Copyright (c) 2013, Carnegie Mellon University
# All rights reserved.
# Authors: Evan Shapiro <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from base import BasePlanner, PlanningError, ClonedPlanningMethod, UnsupportedPlanningError
import openravepy
class SBPLPlanner(BasePlanner):
def __init__(self):
super(SBPLPlanner, self).__init__()
try:
self.planner = openravepy.RaveCreatePlanner(self.env, 'SBPL')
except openravepy.openrave_exception:
raise UnsupportedPlanningError('Unable to create SBPL module')
def setupEnv(self, env):
self.env = env
try:
self.problem = openravepy.RaveCreateProblem(self.env, 'SBPL')
except openravepy.openrave_exception:
raise UnsupportedPlanningError('Unable to create SBPL module.')
def __str__(self):
return 'SBPL'
def SetPlannerParameters(self, params_yaml):
self.planner_params = params_yaml
@ClonedPlanningMethod
def PlanToBasePose(self, robot, goal_pose, timelimit=60.0, return_first=False, **kw_args):
"""
Plan to a base pose using SBPL
@param robot
@param goal_pose desired base pose
@param timelimit timeout in seconds
@param return_first return the first path found (if true, the planner will run until a path is found, ignoring the time limit)
"""
params = openravepy.Planner.PlannerParameters()
from openravepy import DOFAffine
robot.SetActiveDOFs([], DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis)
params.SetRobotActiveJoints(robot)
config_spec = openravepy.RaveGetAffineConfigurationSpecification(DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis, robot)
#params.SetConfigurationSpecification(self.env, config_spec) # This breaks
goal_config = openravepy.RaveGetAffineDOFValuesFromTransform(goal_pose,
DOFAffine.X | DOFAffine.Y | DOFAffine.RotationAxis)
params.SetGoalConfig(goal_config)
# Setup default extra parameters
extra_params = self.planner_params
limits = robot.GetAffineTranslationLimits();
extents = [limits[0][0], limits[1][0], limits[0][1], limits[1][1]];
extra_params["extents"] = extents
extra_params["timelimit"] = timelimit
if return_first:
extra_params["return_first"] = 1
else:
extra_params["return_first"] = 0
extra_params["initial_eps"] = 1.0
for key, value in kw_args.iteritems():
extra_params[key] = value
params.SetExtraParameters(str(extra_params))
traj = openravepy.RaveCreateTrajectory(self.env, '')
try:
self.planner.InitPlan(robot, params)
status = self.planner.PlanPath(traj, releasegil=True)
except Exception as e:
raise PlanningError('Planning failed with error: {0:s}'.format(e))
from openravepy import PlannerStatus
if status not in [ PlannerStatus.HasSolution, PlannerStatus.InterruptedWithSolution ]:
raise PlanningError('Planner returned with status {0:s}.'.format(str(status)))
return traj
|
the-stack_106_16765
|
from collections import defaultdict
adj = defaultdict(list)
with open('input-day12.txt') as file:
for line in file:
line = line.rstrip()
parts = line.split('-')
adj[parts[0]].append(parts[1])
adj[parts[1]].append(parts[0])
paths = []
def trace(path: list[str], curr: str) -> None:
if curr == 'end':
paths.append(path)
return
for nb in adj[curr]:
if nb != curr and (nb.isupper() or nb not in path):
trace([*path, curr], nb)
trace([], 'start')
print(len(paths))
|
the-stack_106_16766
|
import os
import sys
sys.path.append(os.getcwd())
from package import *
table_name = 'ods_house'
print('hello world, hello python, hello azkaban')
if __name__ == '__main__':
for day in DAYS:
for rpt_type in RPT_TYPES:
print(day)
print(rpt_type)
sql_day = f"select * from {table_name} where day = {day}"
print(sql_day)
host = CONFIG.get_value('mysql', 'host')
print(host)
print(type(host))
|
the-stack_106_16767
|
import io
import os
import os.path
from typing import IO, Callable, Iterable, Set, Iterator
from itertools import repeat
from zipfile import ZipFile
from collections import OrderedDict
from contextlib import contextmanager
from jawa.cf import ClassFile
from jawa.constants import ConstantPool, ConstantClass
def _walk(path, follow_links=False, maximum_depth=None):
"""A modified os.walk with support for maximum traversal depth."""
root_level = path.rstrip(os.path.sep).count(os.path.sep)
for root, dirs, files in os.walk(path, followlinks=follow_links):
yield root, dirs, files
if maximum_depth is None:
continue
if root_level + maximum_depth <= root.count(os.path.sep):
del dirs[:]
class ClassLoader(object):
"""Emulate the Java ClassPath.
Provides utilities for managing a java classpath as well as loading
classes from those paths.
:param sources: Optional sources to pass into update().
:param max_cache: The maximum number of ClassFile's to store in the cache.
If set to 0, the cache will be unlimited. [default: 50]
:type max_cache: Long
:param klass: The class to use when constructing ClassFiles.
:type klass: ClassFile or subclass.
:param bytecode_transforms: Default transforms to apply when disassembling
a method.
"""
def __init__(self, *sources, max_cache: int=50, klass=ClassFile,
bytecode_transforms: Iterable[Callable]=None):
self.path_map = {}
self.max_cache = max_cache
self.class_cache = OrderedDict()
self.bytecode_transforms = bytecode_transforms or []
self.klass = klass
if sources:
self.update(*sources)
def __getitem__(self, path: str) -> ClassFile:
return self.load(path)
def __contains__(self, path: str) -> bool:
if path in self.path_map:
return True
elif path + '.class' in self.path_map:
return True
return False
def update(self, *sources, follow_symlinks: bool=False,
maximum_depth: int=20):
"""Add one or more ClassFile sources to the class loader.
If a given source is a directory path, it is traversed up to the
maximum set depth and all files under it are added to the class loader
lookup table.
If a given source is a .jar or .zip file it will be opened and the
file index added to the class loader lookup table.
If a given source is a ClassFile or a subclass, it's immediately
added to the class loader lookup table and the class cache.
:param sources: One or more ClassFile sources to be added.
:param follow_symlinks: True if symlinks should be followed when
traversing filesystem directories.
[default: False]
:param maximum_depth: The maximum sub-directory depth when traversing
filesystem directories. If set to `None` no limit
will be enforced. [default: 20]
"""
for source in sources:
if isinstance(source, self.klass):
self.path_map[source.this.name.value] = source
self.class_cache[source.this.name.value] = source
continue
# Explicit cast to str to support Path objects.
source = str(source)
if source.lower().endswith(('.zip', '.jar')):
zf = ZipFile(source, 'r')
self.path_map.update(zip(zf.namelist(), repeat(zf)))
elif os.path.isdir(source):
walker = _walk(
source,
follow_links=follow_symlinks,
maximum_depth=maximum_depth
)
for root, dirs, files in walker:
for file_ in files:
path_full = os.path.join(root, file_)
path_suffix = os.path.relpath(path_full, source)
self.path_map[path_suffix] = path_full
@contextmanager
def open(self, path: str, mode: str='r') -> IO:
"""Open an IO-like object for `path`.
.. note::
Mode *must* be either 'r' or 'w', as the underlying objects
do not understand the full range of modes.
:param path: The path to open.
:param mode: The mode of the file being opened, either 'r' or 'w'.
"""
entry = self.path_map.get(path)
if entry is None:
raise FileNotFoundError()
if isinstance(entry, str):
with open(entry, 'rb' if mode == 'r' else mode) as source:
yield source
elif isinstance(entry, ZipFile):
yield io.BytesIO(entry.read(path))
else:
raise NotImplementedError()
def load(self, path: str) -> ClassFile:
"""Load the class at `path` and return it.
Load will attempt to load the file at `path` and `path` + .class
before failing.
:param path: Fully-qualified path to a ClassFile.
"""
# Try to refresh the class from the cache, loading it from disk
# if not found.
try:
r = self.class_cache.pop(path)
except KeyError:
with self.open(f'{path}.class') as source:
r = self.klass(source)
r.classloader = self
# Even if it was found re-set the key to update the OrderedDict
# ordering.
self.class_cache[path] = r
# If the cache is enabled remove every item over N started from
# the least-used.
if self.max_cache > 0:
to_pop = max(len(self.class_cache) - self.max_cache, 0)
for _ in repeat(None, to_pop):
self.class_cache.popitem(last=False)
return r
def clear(self):
"""Erase all stored paths and all cached classes."""
self.path_map.clear()
self.class_cache.clear()
def dependencies(self, path: str) -> Set[str]:
"""Returns a set of all classes referenced by the ClassFile at
`path` without reading the entire ClassFile.
This is an optimization method that does not load a complete ClassFile,
nor does it add the results to the ClassLoader cache.
:param path: Fully-qualified path to a ClassFile.
"""
return set(c.name.value for c in self.search_constant_pool(
path=path,
type_=ConstantClass
))
def search_constant_pool(self, *, path: str, **options):
"""Partially load the class at `path`, yield all matching constants
from the ConstantPool.
This is an optimization method that does not load a complete ClassFile,
nor does it add the results to the ClassLoader cache.
:param path: Fully-qualified path to a ClassFile.
:param options: A list of options to pass into `ConstantPool.find()`
"""
with self.open(f'{path}.class') as source:
# Skip over the magic, minor, and major version.
source.read(8)
pool = ConstantPool()
pool.unpack(source)
yield from pool.find(**options)
@property
def classes(self) -> Iterator[str]:
"""Yield the name of all classes discovered in the path map."""
yield from (
c[:-6]
for c in self.path_map.keys() if c.endswith('.class')
)
|
the-stack_106_16768
|
from __future__ import absolute_import, division, print_function
from collections import defaultdict, Iterator, Mapping
from datetime import date, datetime, timedelta
import itertools
import numbers
import warnings
import toolz
from toolz import first, unique, assoc
from toolz.utils import no_default
import pandas as pd
from odo import odo
from ..compatibility import basestring
from ..expr import Expr, Field, Symbol, symbol, Join, Cast
from ..dispatch import dispatch
from ..interactive import coerce_core, into
__all__ = ['compute', 'compute_up']
base = numbers.Number, basestring, date, datetime, timedelta, type(None)
@dispatch(Expr, object)
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data
@dispatch(Expr, object)
def post_compute(expr, result, scope=None):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(Cast, object)
def compute_up(c, b, **kwargs):
# cast only works on the expression system and does not affect the
# computation
return b
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope=None, **kwargs):
return type(seq)(compute(item, scope or {}, **kwargs) for item in seq)
@dispatch(object)
def compute_down(expr, **kwargs):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr
def issubtype(a, b):
""" A custom issubclass """
if issubclass(a, b):
return True
if issubclass(a, (tuple, list, set)) and issubclass(b, Iterator):
return True
if issubclass(b, (tuple, list, set)) and issubclass(a, Iterator):
return True
return False
def type_change(old, new):
""" Was there a significant type change between old and new data?
>>> type_change([1, 2], [3, 4])
False
>>> type_change([1, 2], [3, [1,2,3]])
True
Some special cases exist, like no type change from list to Iterator
>>> type_change([[1, 2]], [iter([1, 2])])
False
"""
if all(isinstance(x, base) for x in old + new):
return False
if len(old) != len(new):
return True
new_types = list(map(type, new))
old_types = list(map(type, old))
return not all(map(issubtype, new_types, old_types))
def top_then_bottom_then_top_again_etc(expr, scope, **kwargs):
""" Compute expression against scope
Does the following interpreter strategy:
1. Try compute_down on the entire expression
2. Otherwise compute_up from the leaves until we experience a type change
(e.g. data changes from dict -> pandas DataFrame)
3. Re-optimize expression and re-pre-compute data
4. Go to step 1
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i4')])
>>> e = s.amount.sum() + 1
>>> top_then_bottom_then_top_again_etc(e, {s: data})
601
See Also
--------
bottom_up_until_type_break -- uses this for bottom-up traversal
top_to_bottom -- older version
bottom_up -- older version still
"""
# 0. Base case: expression is in dict, return associated data
if expr in scope:
return scope[expr]
if not hasattr(expr, '_leaves'):
return expr
leaf_exprs = list(expr._leaves())
leaf_data = [scope.get(leaf) for leaf in leaf_exprs]
# 1. See if we have a direct computation path with compute_down
try:
return compute_down(expr, *leaf_data, **kwargs)
except NotImplementedError:
pass
# 2. Compute from the bottom until there is a data type change
expr2, scope2 = bottom_up_until_type_break(expr, scope, **kwargs)
# 3. Re-optimize data and expressions
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
if pre_compute_:
scope3 = dict((e, pre_compute_(e, datum,
**assoc(kwargs, 'scope', scope2)))
for e, datum in scope2.items())
else:
scope3 = scope2
if optimize_:
try:
expr3 = optimize_(expr2, *[scope3[leaf]
for leaf in expr2._leaves()])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
scope4 = dict((e._subs(_d), d) for e, d in scope3.items())
except NotImplementedError:
expr3 = expr2
scope4 = scope3
else:
expr3 = expr2
scope4 = scope3
# 4. Repeat
if expr.isidentical(expr3):
raise NotImplementedError("Don't know how to compute:\n"
"type(expr): %s\n"
"expr: %s\n"
"data: %s" % (type(expr3), expr3, scope4))
else:
return top_then_bottom_then_top_again_etc(expr3, scope4, **kwargs)
_names = ('leaf_%d' % i for i in itertools.count(1))
_leaf_cache = {}
_used_tokens = defaultdict(set)
def _reset_leaves():
_leaf_cache.clear()
_used_tokens.clear()
def makeleaf(expr):
""" Name of a new leaf replacement for this expression
>>> _reset_leaves()
>>> t = symbol('t', '{x: int, y: int, z: int}')
>>> makeleaf(t) == t
True
>>> makeleaf(t.x)
<`x` symbol; dshape='int32'>
>>> makeleaf(t.x + 1)
<`x` symbol; dshape='int64'>
>>> makeleaf(t.y + 1)
<`y` symbol; dshape='int64'>
>>> makeleaf(t.x).isidentical(makeleaf(t.x + 1))
False
>>> from blaze import sin, cos
>>> x = symbol('x', 'real')
>>> makeleaf(cos(x)**2).isidentical(sin(x) ** 2)
False
>>> makeleaf(t) is t # makeleaf passes on Symbols
True
"""
name = expr._name or '_'
if expr in _leaf_cache:
return _leaf_cache[expr]
if isinstance(expr, Symbol): # Idempotent on symbols
_used_tokens[name].add(expr._token)
_leaf_cache[expr] = expr
return expr
used_for_name = _used_tokens[name]
for token in itertools.count():
if token not in used_for_name:
break
result = symbol(name, expr.dshape, token)
used_for_name.add(token)
_leaf_cache[expr] = result
return result
def data_leaves(expr, scope):
return [scope[leaf] for leaf in expr._leaves()]
def bottom_up_until_type_break(expr, scope, **kwargs):
""" Traverse bottom up until data changes significantly
Parameters
----------
expr: Expression
Expression to compute
scope: dict
namespace matching leaves of expression to data
Returns
-------
expr: Expression
New expression with lower subtrees replaced with leaves
scope: dict
New scope with entries for those leaves
Examples
--------
>>> import numpy as np
>>> s = symbol('s', 'var * {name: string, amount: int}')
>>> data = np.array([('Alice', 100), ('Bob', 200), ('Charlie', 300)],
... dtype=[('name', 'S7'), ('amount', 'i8')])
This computation completes without changing type. We get back a leaf
symbol and a computational result
>>> e = (s.amount + 1).distinct()
>>> bottom_up_until_type_break(e, {s: data}) # doctest: +SKIP
(amount, {amount: array([101, 201, 301])})
This computation has a type change midstream (``list`` to ``int``), so we
stop and get the unfinished computation.
>>> e = s.amount.sum() + 1
>>> bottom_up_until_type_break(e, {s: data})
(amount_sum + 1, {<`amount_sum` symbol; dshape='int64'>: 600})
"""
# 0. Base case. Return if expression is in scope
if expr in scope:
leaf = makeleaf(expr)
return leaf, {leaf: scope[expr]}
inputs = list(unique(expr._inputs))
# 1. Recurse down the tree, calling this function on children
# (this is the bottom part of bottom up)
exprs, new_scopes = zip(*[bottom_up_until_type_break(i, scope, **kwargs)
for i in inputs])
# 2. Form new (much shallower) expression and new (more computed) scope
new_scope = toolz.merge(new_scopes)
new_expr = expr._subs({
i: e for i, e in zip(inputs, exprs) if not i.isidentical(e)
})
old_expr_leaves = expr._leaves()
old_data_leaves = [scope.get(leaf) for leaf in old_expr_leaves]
# 3. If the leaves have changed substantially then stop
key = lambda x: str(type(x))
if type_change(sorted(new_scope.values(), key=key),
sorted(old_data_leaves, key=key)):
return new_expr, new_scope
# 4. Otherwise try to do some actual work
try:
leaf = makeleaf(expr)
_data = [new_scope[i] for i in new_expr._inputs]
except KeyError:
return new_expr, new_scope
try:
return leaf, {leaf: compute_up(new_expr, *_data, scope=new_scope,
**kwargs)}
except NotImplementedError:
return new_expr, new_scope
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Examples
--------
>>> from blaze import data
>>> t = data([1, 2, 3], dshape='3 * int', name='t')
>>> swap_resources_into_scope(t.head(2), {})
(t.head(2), {<`t` symbol; dshape='3 * int32'>: [1, 2, 3]})
>>> expr, scope = _
>>> list(scope.keys())[0]._resources()
{}
"""
resources = expr._resources()
symbol_dict = dict((t, symbol(t._name, t.dshape)) for t in resources)
resources = dict((symbol_dict[k], v) for k, v in resources.items())
other_scope = dict((k, v) for k, v in scope.items()
if k not in symbol_dict)
new_scope = toolz.merge(resources, other_scope)
expr = expr._subs(symbol_dict)
return expr, new_scope
@dispatch(Expr, Mapping)
def compute(expr, d, return_type=no_default, **kwargs):
"""Compute expression against data sources.
Parameters
----------
expr : Expr
The blaze expression to compute.
d : any
The data source to compute expression on.
return_type : {'native', 'core', type}, optional
Type to return data as. Defaults to 'native' but will be changed
to 'core' in version 0.11. 'core' forces the computation into a core
type. 'native' returns the result as is from the respective backend's
``post_compute``. If a type is passed, it will odo the result into the
type before returning.
Examples
--------
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
_reset_leaves()
optimize_ = kwargs.get('optimize', optimize)
pre_compute_ = kwargs.get('pre_compute', pre_compute)
post_compute_ = kwargs.get('post_compute', post_compute)
expr2, d2 = swap_resources_into_scope(expr, d)
if pre_compute_:
d3 = dict(
(e, pre_compute_(e, dat, **kwargs))
for e, dat in d2.items()
if e in expr2
)
else:
d3 = d2
if optimize_:
try:
expr3 = optimize_(expr2, *[v for e, v in d3.items() if e in expr2])
_d = dict(zip(expr2._leaves(), expr3._leaves()))
d4 = dict((e._subs(_d), d) for e, d in d3.items())
except NotImplementedError:
expr3 = expr2
d4 = d3
else:
expr3 = expr2
d4 = d3
result = top_then_bottom_then_top_again_etc(expr3, d4, **kwargs)
if post_compute_:
result = post_compute_(expr3, result, scope=d4)
# return the backend's native response
if return_type is no_default:
msg = ("The default behavior of compute will change in version >= 0.11"
" where the `return_type` parameter will default to 'core'.")
warnings.warn(msg, DeprecationWarning)
# return result as a core type
# (python type, pandas Series/DataFrame, numpy array)
elif return_type == 'core':
result = coerce_core(result, expr.dshape)
# user specified type
elif isinstance(return_type, type):
result = into(return_type, result)
elif return_type != 'native':
raise ValueError(
"Invalid return_type passed to compute: {}".format(return_type),
)
return result
@compute.register(Expr, object)
def compute_single_object(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
ts = set([x for x in expr._subterms() if isinstance(x, Symbol)])
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(Field, Mapping)
def compute_up(expr, data, **kwargs):
return data[expr._name]
@compute_up.register(Join, object, object)
def join_dataframe_to_selectable(expr, lhs, rhs, scope=None, **kwargs):
lexpr, rexpr = expr._leaves()
return compute(
expr,
{
lexpr: odo(lhs, pd.DataFrame, dshape=lexpr.dshape),
rexpr: odo(rhs, pd.DataFrame, dshape=rexpr.dshape)
},
**kwargs
)
|
the-stack_106_16770
|
import htcondor
def remove_job(self, job_id):
"""
Remove the specified job from the queue
"""
constraint = 'ProminenceType == "job" && ClusterId == %d' % int(job_id)
try:
schedd = htcondor.Schedd()
schedd.edit(constraint, 'ProminenceRemoveFromQueue', 'True')
except:
return False
return True
|
the-stack_106_16771
|
import mysql.connector
from mysql.connector import Error
class DAO:
def __init__(self):
try:
self.conexion = mysql.connector.connect(
host='localhost',
port=3306,
user='root',
password='123456',
db='universidad2'
)
except Error as ex:
print("Error al intentar la conexión: {0}".format(ex))
def listar_cursos(self):
if self.conexion.is_connected():
try:
cursor = self.conexion.cursor()
cursor.execute("SELECT * FROM curso ORDER BY nombre ASC")
resultados = cursor.fetchall()
return resultados
except Error as ex:
print("Error al intentar la conexión: {0}".format(ex))
def registrar_curso(self, curso):
if self.conexion.is_connected():
try:
cursor = self.conexion.cursor()
sql = "INSERT INTO curso (codigo, nombre, creditos) VALUES ('{0}', '{1}', {2})"
cursor.execute(sql.format(curso[0], curso[1], curso[2]))
self.conexion.commit()
print("¡Curso registrado!\n")
except Error as ex:
print("Error al intentar la conexión: {0}".format(ex))
def actualizar_curso(self, curso):
if self.conexion.is_connected():
try:
cursor = self.conexion.cursor()
sql = "UPDATE curso SET nombre = '{0}', creditos = {1} WHERE codigo = '{2}'"
cursor.execute(sql.format(curso[1], curso[2], curso[0]))
self.conexion.commit()
print("¡Curso actualizado!\n")
except Error as ex:
print("Error al intentar la conexión: {0}".format(ex))
def eliminar_curso(self, codigo_curso_eliminar):
if self.conexion.is_connected():
try:
cursor = self.conexion.cursor()
sql = "DELETE FROM curso WHERE codigo = '{0}'"
cursor.execute(sql.format(codigo_curso_eliminar))
self.conexion.commit()
print("¡Curso eliminado!\n")
except Error as ex:
print("Error al intentar la conexión: {0}".format(ex))
|
the-stack_106_16775
|
import pytest
from godot.bindings import (
Array,
Node,
Resource,
Area2D,
Vector2,
PoolColorArray,
PoolVector3Array,
PoolVector2Array,
PoolStringArray,
PoolRealArray,
PoolIntArray,
PoolByteArray,
)
class TestArray:
def test_base(self):
v = Array()
assert type(v) == Array
def test_equal(self):
arr = Array()
other = Array()
for item in [1, "foo", Node(), Vector2()]:
arr.append(item)
other.append(item)
assert arr == other
bad = Array([0, 0, 0])
assert not arr == bad # Force use of __eq__
@pytest.mark.parametrize(
"arg",
[
None,
0,
"foo",
Vector2(),
Node(),
[1],
Array([1, 2]),
PoolByteArray([1]),
PoolIntArray([1]),
],
)
def test_bad_equal(self, arg):
arr = Array([1])
assert arr != arg
def test_add(self):
arr = Array([None])
arr += Array([1, "two"]) # __iadd__
assert arr == Array([None, 1, "two"])
arr2 = arr + Array([3]) # __add__
assert arr2 == Array([None, 1, "two", 3])
def test_add_with_non_array(self):
arr = Array([0])
arr += [1, "two"] # __iadd__
assert arr == Array([0, 1, "two"])
arr2 = arr + [3] # __add__
assert arr2 == Array([0, 1, "two", 3])
# Also test list's __iadd__
arr3 = ["-1"]
arr3 += arr
assert arr3 == ["-1", 0, 1, "two"]
# list.__add__ only works with other lists
with pytest.raises(TypeError):
["-1"] + arr
arr4 = ["-1"] + list(arr)
assert arr4 == ["-1", 0, 1, "two"]
@pytest.mark.parametrize("arg", [None, 0, "foo", Vector2(), Node()])
def test_bad_add(self, arg):
with pytest.raises(TypeError):
assert Array() + arg
def test_repr(self):
v = Array()
assert repr(v) == "<Array([])>"
v = Array([1, "foo", Vector2()])
assert repr(v) == "<Array([1, 'foo', <Vector2(x=0.0, y=0.0)>])>"
@pytest.mark.parametrize("arg", [42, "dummy", Node(), Vector2(), [object()]])
def test_bad_instantiate(self, arg):
with pytest.raises(TypeError):
Array(arg)
@pytest.mark.parametrize(
"arg",
[
Array(),
PoolColorArray(),
PoolVector3Array(),
PoolVector2Array(),
PoolStringArray(),
PoolRealArray(),
PoolIntArray(),
PoolByteArray(),
[],
(),
[42, 43, 44],
("foo", "bar", "spam"),
(Node(), Resource(), Area2D()),
[Vector2(), Vector2(), Vector2()],
(Node(), Resource(), Area2D(), Vector2(), "foo", 0), # Enjoy the mix
],
)
def test_instantiate_from_copy(self, arg):
arr = Array(arg)
if hasattr(arg, "_gd_ptr"):
assert arr._gd_ptr != arg._gd_ptr
@pytest.mark.parametrize(
"args",
[
["append", type(None), ("bar",)],
["clear", type(None), ()],
["count", int, ("foo",)],
["empty", bool, ()],
["erase", type(None), ("foo",)],
["front", str, ()],
["back", str, ()],
["find", int, ("foo", 0)],
["find_last", int, ("foo",)],
["has", bool, ("foo",)],
["hash", int, ()],
["insert", type(None), (0, "bar")],
["invert", type(None), ()],
["pop_back", str, ()],
["pop_front", str, ()],
["push_back", type(None), ("bar",)],
["push_front", type(None), ("bar",)],
["resize", type(None), (2,)],
["rfind", int, ("foo", 0)],
["sort", type(None), ()],
# ['sort_custom', type(None), (obj, func)],
],
ids=lambda x: x[0],
)
def test_methods(self, args):
v = Array(["foo"])
# Don't test methods' validity but bindings one
field, ret_type, params = args
assert hasattr(v, field)
method = getattr(v, field)
assert callable(method)
ret = method(*params)
assert type(ret) == ret_type
def test_len(self):
v = Array()
assert len(v) == 0
v.append("foo")
assert len(v) == 1
def test_getitem(self):
v = Array(["foo", 0, Node(), 0.42])
assert v[0] == "foo"
assert v[1] == 0
assert v[-1] == 0.42
def test_getitem_slice(self):
v = Array(["foo", 0, Node()])
assert isinstance(v[:-1], Array)
assert v[1:] == Array([v[1], v[2]])
def test_outofrange_getitem(self):
v = Array(["foo", 0])
with pytest.raises(IndexError):
v[2]
def test_setitem(self):
v = Array(["foo", 0, Node()])
v[0] = "bar"
assert len(v) == 3
assert v[0] == "bar"
v[-1] = 4
assert len(v) == 3
assert v[2] == 4
def test_outofrange_setitem(self):
v = Array(["foo", 0])
with pytest.raises(IndexError):
v[2] = 42
def test_delitem(self):
v = Array(["foo", 0, Node()])
del v[0]
assert len(v) == 2
assert v[0] == 0
del v[-1]
assert len(v) == 1
v[0] == 0
def test_outofrange_delitem(self):
v = Array(["foo", 0])
with pytest.raises(IndexError):
del v[2]
def test_iter(self):
items = ["foo", 0, Node()]
v = Array(items)
items_from_v = [x for x in v]
assert items_from_v == items
def test_append(self):
items = [1, "foo", Node()]
v = Array()
for item in items:
v.append(item)
assert len(v) == 3
assert v == Array(items)
|
the-stack_106_16776
|
import json
import os
from .client import Client
from .exceptions import NotChecked
from .xml_parser import XmlParser
class ArfToJson(Client):
def _set_attributes(self):
self.show_failed_rules = self.arg.show_failed_rules
self.show_not_selected_rules = self.arg.show_not_selected_rules
self.xml_parser = XmlParser(self.source_filename)
def _get_message(self):
MESSAGES = {
'description': 'Client for generating JSON of SCAP rule evaluation results',
'source_filename': 'ARF scan file',
}
return MESSAGES
def create_dict_of_rule(self, rule_id):
return self.xml_parser.get_oval_tree(rule_id).save_tree_to_dict()
def file_is_empty(self, path):
return os.stat(path).st_size == 0
def save_dict_as_json(self, dict_, src):
if os.path.isfile(src) and not self.file_is_empty(src):
with open(src, "r") as f:
data = json.load(f)
for key in data:
dict_[key] = data[key]
with open(src, "w+") as f:
json.dump(dict_, f)
def prepare_data(self, rules):
out = []
rule = None
out_oval_tree_dict = dict()
for rule in rules['rules']:
try:
out_oval_tree_dict[self.START_OF_FILE_NAME + rule +
self.date] = self.create_dict_of_rule(rule)
except NotChecked as error:
out_oval_tree_dict[self.START_OF_FILE_NAME + rule +
self.date] = str(error)
if self.out is not None:
self.save_dict_as_json(out_oval_tree_dict, self.out)
out.append(self.out)
else:
print(
str(json.dumps(out_oval_tree_dict, sort_keys=False, indent=4)))
return out
def prepare_parser(self):
super().prepare_parser()
self.prepare_args_when_user_can_list_in_rules()
|
the-stack_106_16778
|
import datetime
import os
import gym
import numpy
import torch
from games.abstract_game import AbstractGame
class MuZeroConfig:
def __init__(self):
self.seed = 0 # Seed for numpy, torch and the game
### Game
self.observation_shape = (1, 1,
4) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)
self.action_space = [i for i in range(2)] # Fixed list of all possible actions. You should only edit the length
self.players = [i for i in range(1)] # List of players. You should only edit the length
self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation
### Self-Play
self.num_actors = 1 # Number of simultaneous threads self-playing to feed the replay buffer
self.max_moves = 500 # Maximum number of moves if game is not finished before
self.num_simulations = 50 # Number of future moves self-simulated
self.discount = 0.997 # Chronological discount of the reward
self.temperature_threshold = 500 # Number of moves before dropping temperature to 0 (ie playing according to the max)
# Root prior exploration noise
self.root_dirichlet_alpha = 0.25
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
### Network
self.network = "fullyconnected" # "resnet" / "fullyconnected"
self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size
# Residual Network
self.downsample = False # Downsample observations before representation network (See paper appendix Network Architecture)
self.blocks = 1 # Number of blocks in the ResNet
self.channels = 2 # Number of channels in the ResNet
self.reduced_channels = 2 # Number of channels before heads of dynamic and prediction networks
self.resnet_fc_reward_layers = [] # Define the hidden layers in the reward head of the dynamic network
self.resnet_fc_value_layers = [] # Define the hidden layers in the value head of the prediction network
self.resnet_fc_policy_layers = [] # Define the hidden layers in the policy head of the prediction network
# Fully Connected Network
self.encoding_size = 8
self.fc_reward_layers = [64] # Define the hidden layers in the reward network
self.fc_value_layers = [] # Define the hidden layers in the value network
self.fc_policy_layers = [] # Define the hidden layers in the policy network
self.fc_representation_layers = [] # Define the hidden layers in the representation network
self.fc_dynamics_layers = [64] # Define the hidden layers in the dynamics network
### Training
self.results_path = os.path.join(os.path.dirname(__file__), "../results", os.path.basename(__file__)[:-3],
datetime.datetime.now().strftime(
"%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.training_steps = 10000 # Total number of training steps (ie weights update according to a batch)
self.batch_size = 128 # Number of parts of games to train on at each training step
self.checkpoint_interval = 20 # Number of training steps before using the model for sef-playing
self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)
self.training_device = "cuda" if torch.cuda.is_available() else "cpu" # Train on GPU if available
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
# Exponential learning rate schedule
self.lr_init = 0.05 # Initial learning rate
self.lr_decay_rate = 0.9 # Set it to 1 to use a constant learning rate
self.lr_decay_steps = 10000
# Muzero Reanalyze
self.reanalyze_mode = "true" # or "fast"
self.num_reanalyze_cpus = 27
self.policy_update_rate = 0.8
### Replay Buffer
self.window_size = 500 # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 5 # Number of game moves to keep for every batch element
self.td_steps = 50 # Number of steps in the future to take into account for calculating the target value
self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
# Prioritized Replay (See paper appendix Training)
self.PER = True # Select in priority the elements in the replay buffer which are unexpected for the network
self.use_max_priority = False # Use the n-step TD error as initial priority. Better for large replay buffer
self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1
self.PER_beta = 1.0
### Adjust the self play / training ratio to avoid over/underfitting
self.self_play_delay = 0 # Number of seconds to wait after each played game
self.training_delay = 0 # Number of seconds to wait after each training step
self.ratio = None # Desired self played games per training step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
if trained_steps < 0.5 * self.training_steps:
return 1.0
elif trained_steps < 0.75 * self.training_steps:
return 0.5
else:
return 0.25
class Game(AbstractGame):
"""
Game wrapper.
"""
def __init__(self, seed=None):
self.env = gym.make("CartPole-v1")
if seed is not None:
self.env.seed(seed)
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
observation, reward, done, _ = self.env.step(action)
return numpy.array([[observation]]), reward, done
def to_play(self):
"""
Return the current player.
Returns:
The current player, it should be an element of the players list in the config.
"""
return 0
def legal_actions(self):
"""
Should return the legal actions at each turn, if it is not available, it can return
the whole action space. At each turn, the game have to be able to handle one of returned actions.
For complex game where calculating legal moves is too long, the idea is to define the legal actions
equal to the action space but to return a negative reward if the action is illegal.
Returns:
An array of integers, subset of the action space.
"""
return [i for i in range(2)]
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
return numpy.array([[self.env.reset()]])
def close(self):
"""
Properly close the game.
"""
self.env.close()
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
def human_to_action(self):
"""
For multiplayer games, ask the user for a legal action
and return the corresponding action number.
Returns:
An integer from the action space.
"""
pass
def action_to_string(self, action_number):
"""
Convert an action number to a string representing the action.
Args:
action_number: an integer from the action space.
Returns:
String representing the action.
"""
actions = {
0: "Push cart to the left",
1: "Push cart to the right",
}
return "{}. {}".format(action_number, actions[action_number])
|
the-stack_106_16780
|
# -*- coding: utf-8 -*-
import importlib
from a4kSubtitles.lib import utils
__all = utils.get_all_relative_entries(__file__)
__display_names = {
'addic7ed': 'Addic7ed',
'bsplayer': 'BSPlayer',
'opensubtitles': 'OpenSubtitles',
'podnadpisi': 'Podnadpisi',
'subscene': 'Subscene',
}
def __set_fn_if_missing(service, fn_name, fn):
if not getattr(service, fn_name, None):
setattr(service, fn_name, fn)
services = {}
for service_name in __all:
service = services[service_name] = importlib.import_module('a4kSubtitles.services.%s' % service_name)
service.context = utils.DictAsObject({})
service.display_name = __display_names[service_name]
__set_fn_if_missing(service, 'build_auth_request', lambda _, __: None)
assert service.build_search_requests
assert service.parse_search_response
assert service.build_download_request
|
the-stack_106_16781
|
import logging
import os
def is_true(value: str) -> bool:
return value.lower() in ['true', '1', 't', 'y', 'yes']
def is_not_blank(value) -> bool:
return value and str.strip(value)
def initialize_logger(output_dir):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(relativeCreated)d] %(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create error file handler and set level to error
handler = logging.FileHandler(os.path.join(output_dir, "error.log"), "w", encoding=None, delay=True)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("%(asctime)s [%(relativeCreated)d] %(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# create debug file handler and set level to debug
handler = logging.FileHandler(os.path.join(output_dir, "all.log"), "w")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s [%(relativeCreated)d] %(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
the-stack_106_16782
|
s = input("Please enter the number of seconds:")
try:
s = int(s)
s0 = s
except ValueError:
print("Please enter a number.")
else:
if s >= 0:
s = int(s)
m,s = (divmod(s,60))
h,m = (divmod(m,60))
d,h = (divmod(h,24))
print(f"{s0} seconds correspond to {d} day, {h} hours, {m} minutes and {s} seconds")
else:
print("Please enter a positive integer number.")
|
the-stack_106_16784
|
# -*- coding: utf-8 -*-
"""Forms to edit action content.
EditActionOutForm: Form to process content action_out (Base class)
EditActionIn: Form to process action in elements
"""
from typing import Any, Dict, List, Tuple
from django import forms
from django.utils.translation import ugettext_lazy as _
from django_summernote.widgets import SummernoteInplaceWidget
from ontask import models
from ontask.action import evaluate
from ontask.core import ONTASK_UPLOAD_FIELD_PREFIX, column_to_field
class EditActionOutForm(forms.ModelForm):
"""Main class to edit an action out."""
text_content = forms.CharField(label='', required=False)
def __init__(self, *args, **kargs):
"""Adjust field parameters for content and target_URL."""
super().__init__(*args, **kargs)
# Personalized text, canvas email
if (
self.instance.action_type == models.Action.PERSONALIZED_TEXT
or self.instance.action_type == models.Action.RUBRIC_TEXT
or self.instance.action_type == models.Action.EMAIL_REPORT
):
self.fields['text_content'].widget = SummernoteInplaceWidget()
# Add the Target URL field
if (
self.instance.action_type == models.Action.PERSONALIZED_JSON
or self.instance.action_type == models.Action.JSON_REPORT
):
# Add the target_url field
self.fields['target_url'] = forms.CharField(
initial=self.instance.target_url,
label=_('Target URL'),
strip=True,
required=False,
widget=forms.Textarea(
attrs={
'rows': 1,
'cols': 80,
'placeholder': _('URL to send the JSON object'),
},
),
)
# Modify the content field so that it uses the TextArea
self.fields['text_content'].widget = forms.Textarea(
attrs={
'cols': 80,
'rows': 15,
'placeholder': _('Write a JSON object'),
},
)
if self.instance.action_type == models.Action.PERSONALIZED_CANVAS_EMAIL:
# Modify the content field so that it uses the TextArea
self.fields['text_content'].widget = forms.Textarea(
attrs={
'cols': 80,
'rows': 15,
'placeholder': _('Write a plain text message'),
},
)
def clean(self) -> Dict:
"""Verify that the template text renders correctly."""
form_data = super().clean()
try:
evaluate.render_action_template(
form_data['text_content'],
{},
self.instance)
except Exception as exc:
# Pass the django exception as an error form
self.add_error(None, str(exc))
return form_data
class Meta:
"""Select action and the content field only."""
model = models.Action
fields = ['text_content']
class EnterActionIn(forms.Form):
"""Form to enter values in a row."""
def __init__(self, *args, **kargs):
"""Store parameters and adjust questions, columns, etc."""
# Store the parameters
self.tuples = kargs.pop('tuples', None)
self.context = kargs.pop('context', None)
self.form_values = kargs.pop('values', None)
self.show_key = kargs.pop('show_key', None)
self.is_empty = True
super().__init__(*args, **kargs)
# If no initial values have been given, replicate a list of Nones
if not self.form_values:
self.form_values = [None] * len(self.tuples)
for idx, cc_item in enumerate(self.tuples):
# Skip the key columns if flag is true
if not self.show_key and cc_item.column.is_key:
continue
# Skip the element if there is a condition and it is false
if cc_item.condition and not self.context[cc_item.condition.name]:
continue
field_name = ONTASK_UPLOAD_FIELD_PREFIX + '{0}'.format(idx)
the_field = column_to_field(
cc_item.column,
self.form_values[idx],
label=cc_item.column.description_text)
self.fields[field_name] = the_field
if cc_item.column.is_key or not cc_item.changes_allowed:
the_field.widget.attrs['readonly'] = 'readonly'
the_field.disabled = True
else:
# We are adding at least one field to be filled
self.is_empty = False
def get_key_value_pairs(self) -> Tuple[List, List, str, Any]:
"""Extract key/value pairs and primary key/value.
:return: Tuple with List[keys], List[values], where_field, where_value
"""
keys = []
values = []
where_field = None
where_value = None
# Create the SET name = value part of the query
for idx, colcon in enumerate(self.tuples):
if colcon.column.is_key and not self.show_key:
# If it is a learner request and a key column, skip
continue
# Skip the element if there is a condition and it is false
if colcon.condition and not self.context[colcon.condition.name]:
continue
field_value = self.cleaned_data[
ONTASK_UPLOAD_FIELD_PREFIX + '{0}'.format(idx)]
if colcon.column.is_key:
# Remember one unique key for selecting the row
where_field = colcon.column.name
where_value = field_value
continue
keys.append(colcon.column.name)
values.append(field_value)
return keys, values, where_field, where_value
|
the-stack_106_16785
|
import datetime
import os
from re import sub
import signal
import subprocess
import time
import uuid
from pathlib import Path
import rq
from fuzzware_pipeline.logging_handler import logging_handler
from rq.worker import WorkerStatus
from .. import naming_conventions as nc
from ..run_target import gen_run_arglist, run_target
from ..util.config import load_extra_args, parse_extra_args
logger = logging_handler().get_logger("tracegen")
FORKSRV_FD = 198
# Make sure these names are synchronized with the argument names below
ARGNAME_BBL_SET_PATH, ARGNAME_MMIO_SET_PATH = "bbl_set_path", "mmio_set_path"
ARGNAME_EXTRA_ARGS = "extra_args"
FORKSERVER_UNSUPPORTED_TRACE_ARGS = ("mmio_trace_path", "bbl_trace_path", "ram_trace_path")
def gen_traces(config_path, input_path, bbl_trace_path=None, ram_trace_path=None, mmio_trace_path=None, bbl_set_path=None, mmio_set_path=None, extra_args=None, silent=False, bbl_hash_path=None):
extra_args = list(extra_args) if extra_args else []
if bbl_trace_path is not None:
extra_args += ["--bb-trace-out", bbl_trace_path]
if ram_trace_path is not None:
extra_args += ["--ram-trace-out", ram_trace_path]
if mmio_trace_path is not None:
extra_args += ["--mmio-trace-out", mmio_trace_path]
if bbl_set_path is not None:
extra_args += ["--bb-set-out", bbl_set_path]
if mmio_set_path is not None:
extra_args += ["--mmio-set-out", mmio_set_path]
if bbl_hash_path is not None:
extra_args += ["--bb-hash-out", bbl_hash_path]
run_target(config_path, input_path, extra_args, silent=silent, stdout=subprocess.DEVNULL if silent else None, stderr=subprocess.DEVNULL if silent else None)
return True
def batch_gen_native_traces(config_path, input_paths, extra_args=None, bbl_set_paths=None, mmio_set_paths=None, bbl_hash_paths=None, silent=False):
"""
Utility function to generate batches of traces that the emulator
supports native snapshotting for.
"""
common_length = len(input_paths)
# Spawn process, while disabling generation types where we can
gentrace_proc = TraceGenProc(config_path, extra_args, silent=silent,
gen_bb_set=(not bbl_set_paths) is False and not all(p is None for p in bbl_set_paths),
gen_mmio_set=(not mmio_set_paths) is False and not all(p is None for p in mmio_set_paths),
gen_bb_hash=(not bbl_hash_paths) is False and not all(p is None for p in bbl_hash_paths)
)
bbl_set_paths = bbl_set_paths or common_length * [None]
mmio_set_paths = mmio_set_paths or common_length * [None]
bbl_hash_paths = bbl_hash_paths or common_length * [None]
for input_path, bbl_set_path, mmio_set_path, bbl_hash_path in zip(input_paths, bbl_set_paths, mmio_set_paths, bbl_hash_paths):
if not gentrace_proc.gen_trace(input_path, bbl_set_path, mmio_set_path, bbl_hash_path):
logger.error(f"Hit abrupt end while trying to execute input {input_path}")
assert(False)
gentrace_proc.destroy()
def gen_missing_maindir_traces(maindir, required_trace_prefixes, fuzzer_nums=None, tracedir_postfix="", log_progress=False, verbose=False, crashing_inputs=False):
projdir = nc.project_base(maindir)
config_path = nc.config_file_for_main_path(maindir)
extra_args = parse_extra_args(load_extra_args(nc.extra_args_for_config_path(config_path)), projdir)
jobs_for_config = []
fuzzer_dirs = nc.fuzzer_dirs_for_main_dir(maindir)
if fuzzer_nums is not None:
assert all(0 < i <= len(fuzzer_dirs) for i in fuzzer_nums)
fuzzer_dirs = [fuzzer_dirs[i-1] for i in fuzzer_nums]
can_use_native_batch = all(prefix in nc.NATIVE_TRACE_FILENAME_PREFIXES for prefix in required_trace_prefixes)
num_gentrace_jobs = 0
for fuzzer_dir in fuzzer_dirs:
tracedir = fuzzer_dir.joinpath(nc.trace_dirname(tracedir_postfix, is_crash=crashing_inputs))
# In case we have a custom tracedir postfix, we need to create directories on demand
if not tracedir.exists():
tracedir.mkdir()
for input_path in nc.input_paths_for_fuzzer_dir(fuzzer_dir, crashes=crashing_inputs):
bbl_trace_path, ram_trace_path, mmio_trace_path = None, None, None
bbl_set_path, mmio_set_path, bbl_hash_path = None, None, None
for trace_path in nc.trace_paths_for_input(input_path):
trace_dir, trace_name = os.path.split(trace_path)
if tracedir_postfix:
trace_path = os.path.join(trace_dir+f"_{tracedir_postfix}", trace_name)
for prefix in required_trace_prefixes:
if trace_name.startswith(prefix) and not os.path.exists(trace_path):
if prefix == nc.PREFIX_BASIC_BLOCK_TRACE:
bbl_trace_path = trace_path
elif prefix == nc.PREFIX_MMIO_TRACE:
mmio_trace_path = trace_path
elif prefix == nc.PREFIX_RAM_TRACE:
ram_trace_path = trace_path
elif prefix == nc.PREFIX_BASIC_BLOCK_SET:
bbl_set_path = trace_path
elif prefix == nc.PREFIX_MMIO_SET:
mmio_set_path = trace_path
elif prefix == nc.PREFIX_BASIC_BLOCK_HASH:
bbl_hash_path = trace_path
else:
assert False
break
if any(p is not None for p in (bbl_trace_path, ram_trace_path, mmio_trace_path, bbl_set_path, mmio_set_path, bbl_hash_path)):
num_gentrace_jobs += 1
if can_use_native_batch:
# This is ugly, but this way we don't need to pivot the lists later
if not jobs_for_config:
jobs_for_config = [[], [], [], []]
jobs_for_config[0].append(input_path)
jobs_for_config[1].append(bbl_set_path)
jobs_for_config[2].append(mmio_set_path)
jobs_for_config[3].append(bbl_hash_path)
else:
jobs_for_config.append((str(input_path), bbl_trace_path, ram_trace_path, mmio_trace_path, bbl_set_path, mmio_set_path, bbl_hash_path))
# If we found jobs for the given config path, add them
if not jobs_for_config:
if log_progress:
logger.info("No traces to generate for main path")
return
num_processed = 0
start_time = time.time()
if can_use_native_batch:
input_paths, bbl_set_paths, mmio_set_paths, bbl_hash_paths = jobs_for_config
batch_gen_native_traces(config_path, input_paths, extra_args, bbl_set_paths, mmio_set_paths, bbl_hash_paths, not verbose)
if log_progress:
logger.info(f"Generating traces took {time.time() - start_time:.02f} seconds for {len(input_paths)} input(s)")
else:
num_processed = 0
for input_path, bbl_trace_path, ram_trace_path, mmio_trace_path, bbl_set_path, mmio_set_path, bbl_hash_path in jobs_for_config:
gen_traces(str(config_path), str(input_path),
bbl_trace_path=bbl_trace_path, ram_trace_path=ram_trace_path, mmio_trace_path=mmio_trace_path,
bbl_set_path=bbl_set_path, mmio_set_path=mmio_set_path, bbl_hash_path=bbl_hash_path,
extra_args=extra_args, silent=not verbose
)
num_processed += 1
if log_progress:
if num_processed > 0 and num_processed % 50 == 0:
time_passed = round(time.time() - start_time)
relative_done = (num_processed+1) / num_gentrace_jobs
time_estimated = round((relative_done ** (-1)) * time_passed)
logger.info(f"[*] Processed {num_processed}/{num_gentrace_jobs} in {time_passed} seconds. Estimated seconds remaining: {time_estimated-time_passed}")
def gen_all_missing_traces(projdir, trace_name_prefixes=None, log_progress=False, verbose=False, crashing_inputs=False):
if trace_name_prefixes is None:
trace_name_prefixes = nc.TRACE_FILENAME_PREFIXES
for maindir in nc.main_dirs_for_proj(projdir):
gen_missing_maindir_traces(maindir, trace_name_prefixes, log_progress=log_progress, verbose=verbose, crashing_inputs=crashing_inputs)
def spawn_forkserver_emu_child(config_path, input_path, extra_args, silent=False):
arg_list = gen_run_arglist(config_path, extra_args) + [input_path]
# Set up pipes for AFL fork server communication
control_fd_rd, control_fd_wr = os.pipe()
status_fd_rd, status_fd_wr = os.pipe()
os.dup2(control_fd_rd, FORKSRV_FD)
os.dup2(status_fd_wr, FORKSRV_FD + 1)
os.set_inheritable(FORKSRV_FD, True)
os.set_inheritable(FORKSRV_FD + 1, True)
# Close duplicated fds
os.close(control_fd_rd)
os.close(status_fd_wr)
subprocess_env = os.environ
subprocess_env.setdefault("__AFL_SHM_ID", "0")
# Silence stdout/stderr if requested
stdout, stderr = None, None
if silent:
stdout, stderr = subprocess.DEVNULL, subprocess.DEVNULL
proc = subprocess.Popen(arg_list, stdout=stdout, stderr=stderr, pass_fds=[FORKSRV_FD, FORKSRV_FD + 1], env=subprocess_env)
# Close opposing end of pipe
os.close(FORKSRV_FD)
os.close(FORKSRV_FD + 1)
# Wait for emulator process to respond
assert len(os.read(status_fd_rd, 4)) == 4
return proc, control_fd_wr, status_fd_rd
class TraceGenProc:
"""
Class which spawns an underlying emulator child to then generate
traces quickly, given a stable configuration.
This fakes the fuzzer side of the AFL fork server setup to the emulator
so that the emulator can use snapshotting to quickly run multiple times.
"""
uuid: str
# Stable paths to pass arguments to emulator where we create symlinks later
stable_input_path: Path = None
stable_bbset_path: Path = None
stable_bbhash_path: Path = None
stable_mmioset_path: Path = None
child_proc = None
status_read_fd = None
ctrl_write_fd = None
config_path = None
def __init__(self, config_path, extra_args=None, gen_bb_set=False, gen_mmio_set=False, gen_bb_hash=False, base_path="/tmp", silent=False):
self.uuid = str(uuid.uuid4())
self.stable_input_path = Path(os.path.join(base_path, ".trace_input_"+self.uuid))
if gen_bb_set:
self.stable_bbset_path = Path(os.path.join(base_path, ".trace_bbset_"+self.uuid))
if gen_bb_hash:
self.stable_bbhash_path = Path(os.path.join(base_path, ".trace_bbhash_"+self.uuid))
if gen_mmio_set:
self.stable_mmioset_path = Path(os.path.join(base_path, ".trace_mmioset_"+self.uuid))
self.spawn_emulator_child(config_path, extra_args, gen_bb_set=gen_bb_set, gen_mmio_set=gen_mmio_set, gen_bb_hash=gen_bb_hash, silent=silent)
def destroy(self):
self.rm_old_links()
self.kill_emulator_child()
def __del__(self):
self.destroy()
try:
super().__del__()
except AttributeError:
pass
def spawn_emulator_child(self, config_path, extra_args=None, gen_bb_set=False, gen_mmio_set=False, gen_bb_hash=False, silent=False):
extra_args = extra_args or []
if gen_bb_set:
extra_args += ["--bb-set-out", str(self.stable_bbset_path)]
if gen_mmio_set:
extra_args += ["--mmio-set-out", str(self.stable_mmioset_path)]
if gen_bb_hash:
extra_args += ["--bb-hash-out", str(self.stable_bbhash_path)]
logger.debug(f"spawn_emulator_child setting up arguments {extra_args}")
self.child_proc, self.ctrl_write_fd, self.status_read_fd = spawn_forkserver_emu_child(config_path, self.stable_input_path, extra_args, silent=silent)
def kill_emulator_child(self):
logger.debug("[Trace Gen] kill_emulator_child")
if self.status_read_fd is not None:
os.close(self.status_read_fd)
os.close(self.ctrl_write_fd)
try:
self.child_proc.kill()
except OSError:
pass
self.status_read_fd = None
self.ctrl_write_fd = None
self.child_proc = None
def rm_old_links(self):
for p in (self.stable_input_path, self.stable_bbset_path, self.stable_mmioset_path, self.stable_bbhash_path):
if p is not None:
try:
p.unlink()
except FileNotFoundError:
pass
def setup_links(self, input_path, bb_set_path=None, mmio_set_path=None, bb_hash_path=None):
# Create Symlinks to input and output paths
self.rm_old_links()
# We always need an input
self.stable_input_path.symlink_to(input_path)
# For output paths, we may not need to create all
if bb_set_path:
self.stable_bbset_path.symlink_to(bb_set_path)
if mmio_set_path:
self.stable_mmioset_path.symlink_to(mmio_set_path)
if bb_hash_path:
self.stable_bbhash_path.symlink_to(bb_hash_path)
def gen_trace(self, input_path, bb_set_path=None, mmio_set_path=None, bb_hash_path=None):
# First set up symlinks to the input file and the trace destinations
self.setup_links(input_path, bb_set_path, mmio_set_path, bb_hash_path)
# And now, kick off child by sending go via control fd
assert os.write(self.ctrl_write_fd, b"\0\0\0\0") == 4
# Read two times from FD (one time for start, one time for emu finish)
for _ in range(2):
sock_read_len = len(os.read(self.status_read_fd, 4))
if sock_read_len != 4:
break
# We have been successful in case the expected amount of bytes are read
return sock_read_len == 4
class TraceGenWorker(rq.Worker): #pylint: disable=too-many-instance-attributes
last_config_path = None
trace_proc: TraceGenProc = None
def __del__(self):
if self.trace_proc:
self.trace_proc.destroy()
try:
super().__del__()
except AttributeError:
pass
def discard_trace_proc(self):
if self.trace_proc:
self.trace_proc.destroy()
self.trace_proc = None
def execute_job(self, job, queue): #pylint: disable=inconsistent-return-statements
# self.set_state(WorkerStatus.BUSY)
kwargs = job.kwargs
bbl_set_path, mmio_set_path = kwargs.get(ARGNAME_BBL_SET_PATH, False), kwargs.get(ARGNAME_MMIO_SET_PATH)
# If we don't have exactly bbl and MMIO set generation, forward to original implementation
if (not bbl_set_path) or (not mmio_set_path) or \
any(kwargs.get(argname) for argname in FORKSERVER_UNSUPPORTED_TRACE_ARGS):
return super().execute_job(job, queue)
self.prepare_job_execution(job)
job.started_at = datetime.datetime.utcnow()
config_path, input_path = job.args
extra_args = kwargs.get(ARGNAME_EXTRA_ARGS, [])
# If we need to switch to another config, kill current emulator child process
if config_path != self.last_config_path:
logger.info(f"Discarding current trace process due to changed config path. Config changed from {self.last_config_path} to {config_path}")
self.discard_trace_proc()
self.last_config_path = config_path
# If we do not have a child process already, create one now
if self.trace_proc is None:
logger.info(f"Creating new trace process for config path {config_path}")
# Start child process
self.trace_proc = TraceGenProc(config_path, extra_args, gen_bb_set=True, gen_mmio_set=True)
success = self.trace_proc.gen_trace(input_path, bbl_set_path, mmio_set_path)
job.ended_at = datetime.datetime.utcnow()
logger.info(f"Generated traces for {os.path.basename(input_path)} in {(job.ended_at-job.started_at).microseconds} us")
if success:
# Job success
job.set_status(rq.job.JobStatus.FINISHED)
self.handle_job_success(job=job, queue=queue,
started_job_registry=queue.started_job_registry)
else:
# Job fail
self.handle_job_failure(job=job, queue=queue,
started_job_registry=queue.started_job_registry)
# The emulator is likely in a bad state now, kill child
logger.warning(f"[Trace Gen Job] got a failed tracing job (which ran from {job.started_at} to {job.ended_at}). closing file pipe FDs for kill + respawn.")
self.trace_proc.destroy()
self.trace_proc = None
self.set_state(WorkerStatus.IDLE)
|
the-stack_106_16787
|
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
"""
A framework for image processing.
Images are stored in instances of the Image class. The data is stored as
ARGB colors in a numpy.array() with dtype uint32 or if numpy isn't
installed in an array.array("I") or array.array("L"), whichever is
needed for 32-bit values.
Supporting modules provide the ability to load/save files in particular
image formats (see Xbm.py and Xpm.py).
Every module *must* provide can_save(filename) and can_load(filename)
functions: these should return a value between 0 (can't) and 100 (can to
perfection); and, of course, load(image, filename), and save(image,
filename) functions. If you want to override an existing module (e.g.,
Xbm.py, just create a new one, say, Xbm2.py, and make sure its
can_load() and can_save() functions return higher values than the Xpm.py
module. (All standard modules return 100 or less for what they can and 0
for what they can't.)
Rather than creating Images directly, use one of the construction
functions, create(), from_file(), or from_data().
For sophisticated image processing install numpy _and_ scipy and use
the scipy image processing functions.
"""
import collections
import importlib
import os
import re
import sys
import warnings
try:
import numpy
except ImportError:
numpy = None
import array
CLEAR_ALPHA = 0x00FFFFFF # & to ARGB color int to get rid of alpha channel
MAX_ARGB = 0xFFFFFFFF
MAX_COMPONENT = 0xFF
SOLID = 0xFF000000 # + to RGB color int to get a solid ARGB color int
class Error(Exception): pass
_Modules = []
for name in os.listdir(os.path.dirname(__file__)):
if not name.startswith("_") and name.endswith(".py"):
name = "." + os.path.splitext(name)[0]
try:
module = importlib.import_module(name, "Image")
_Modules.append(module)
except ImportError as err:
warnings.warn("failed to load Image module: {}".format(err))
del name, module
class Image:
def __init__(self, width=None, height=None, filename=None,
background=None, pixels=None):
"""Create Images using one of the convenience construction
functions: from_file(), create(), and from_data()
Although .width and .height are public they should not be
changed except in load() methods."""
assert (width is not None and (height is not None or
pixels is not None) or (filename is not None))
if filename is not None: # From file
self.load(filename)
elif pixels is not None: # From data
self.width = width
self.height = len(pixels) // width
self.filename = filename
self.meta = {}
self.pixels = pixels
else: # Empty
self.width = width
self.height = height
self.filename = filename
self.meta = {}
self.pixels = create_array(width, height, background)
@classmethod
def from_file(Class, filename):
return Class(filename=filename)
@classmethod
def create(Class, width, height, background=None):
return Class(width=width, height=height, background=background)
@classmethod
def from_data(Class, width, pixels):
return Class(width=width, pixels=pixels)
def load(self, filename):
"""loads the image from the file called filename; the format is
determined by the file suffix"""
module = Image._choose_module("can_load", filename)
if module is not None:
self.width = self.height = None
self.meta = {}
module.load(self, filename)
self.filename = filename
else:
raise Error("no Image module can load files of type {}".format(
os.path.splitext(filename)[1]))
def save(self, filename=None):
"""saves the image to a file called filename; the format is
determined by the file suffix"""
filename = filename if filename is not None else self.filename
if not filename:
raise Error("can't save without a filename")
module = Image._choose_module("can_save", filename)
if module is not None:
module.save(self, filename)
self.filename = filename
else:
raise Error("no Image module can save files of type {}".format(
os.path.splitext(filename)[1]))
@staticmethod
def _choose_module(actionName, filename):
bestRating = 0
bestModule = None
for module in _Modules:
action = getattr(module, actionName, None)
if action is not None:
rating = action(filename)
if rating > bestRating:
bestRating = rating
bestModule = module
return bestModule
def pixel(self, x, y):
"""returns the color at the given pixel as an ARGB int; x and y
must be in range"""
return self.pixels[(y * self.width) + x]
def set_pixel(self, x, y, color):
"""sets the given pixel to the given color; x and y must be in
range; color must be an ARGB int"""
self.pixels[(y * self.width) + x] = color
# Bresenham's mid-point line scanning algorithm from
# http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
def line(self, x0, y0, x1, y1, color):
"""draws the line in the given color; the coordinates must be in
range; the color must be an ARGB int"""
Δx = abs(x1 - x0)
Δy = abs(y1 - y0)
xInc = 1 if x0 < x1 else -1
yInc = 1 if y0 < y1 else -1
δ = Δx - Δy
while True:
self.set_pixel(x0, y0, color)
if x0 == x1 and y0 == y1:
break
δ2 = 2 * δ
if δ2 > -Δy:
δ -= Δy
x0 += xInc
if δ2 < Δx:
δ += Δx
y0 += yInc
def rectangle(self, x0, y0, x1, y1, outline=None, fill=None):
"""draws a rectangle outline if outline is not None and fill is
None, or a filled rectangle if outline is None and fill is not
None, or an outlined and filled rectangle if both are not None;
the coordinates must be in range; the outline and fill colors
must be ARGB ints"""
assert outline is not None or fill is not None
if fill is not None:
if y0 > y1:
y0, y1 = y1, y0
if outline is not None: # no point drawing over the outline
x0 += 1
x1 -= 1
y0 += 1
y1 -= 1
for y in range(y0, y1 + 1):
self.line(x0, y, x1, y, fill)
if outline is not None:
self.line(x0, y0, x1, y0, outline)
self.line(x1, y0, x1, y1, outline)
self.line(x1, y1, x0, y1, outline)
self.line(x0, y1, x0, y0, outline)
def ellipse(self, x0, y0, x1, y1, outline=None, fill=None):
"""draws an ellipse outline if outline is not None and fill is
None, or a filledn ellipse if outline is None and fill is not
None, or an outlined and filledn ellipse if both are not None;
the coordinates must be in range; the outline and fill colors
must be ARGB ints"""
assert outline is not None or fill is not None
if x0 > x1:
x0, x1 = x1, x0
if y0 > y1:
y0, y1 = y1, y0
width = x1 - x0
height = y1 - y0
if fill is not None:
# Algorithm based on
# http://stackoverflow.com/questions/10322341/
# simple-algorithm-for-drawing-filled-ellipse-in-c-c
halfWidth = width // 2
halfHeight = height // 2
midX = x0 + halfWidth
midY = y0 + halfHeight
for y in range(-halfHeight, halfHeight + 1):
for x in range(-halfWidth, halfWidth + 1):
Δx = x / halfWidth
Δy = y / halfHeight
if ((Δx * Δx) + (Δy * Δy)) <= 1:
self.set_pixel(int(round(midX + x)),
int(round(midY + y)), fill)
if outline is not None:
# Midpoint ellipse algorithm from "Computer Graphics
# Principles and Practice".
if x1 > x0:
midX = ((x1 - x0) // 2) + x0
else:
midX = ((x0 - x1) // 2) + x1
if y1 > y0:
midY = ((y1 - y0) // 2) + y0
else:
midY = ((y0 - y1) // 2) + y1
def ellipse_point(Δx, Δy):
# Δx is always an int; Δy is always a float
self.set_pixel(midX + Δx, int(round(midY + Δy)), outline)
self.set_pixel(midX - Δx, int(round(midY - Δy)), outline)
self.set_pixel(midX + Δx, int(round(midY - Δy)), outline)
self.set_pixel(midX - Δx, int(round(midY + Δy)), outline)
a = abs(x1 - x0) / 2
b = abs(y1 - y0) / 2
a2 = a ** 2
b2 = b ** 2
Δx = 0
Δy = b
p = b2 - (a2 * b) + (a2 / 4)
ellipse_point(Δx, Δy)
while (a2 * (Δy - 0.5)) > (b2 * (Δx + 1)):
if p < 0:
p += b2 * ((2 * Δx) + 3)
Δx += 1
else:
p += (b2 * ((2 * Δx) + 3)) + (a2 * ((-2 * Δy) + 2))
Δx += 1
Δy -= 1
ellipse_point(Δx, Δy)
p = ((b2 * ((Δx + 0.5) ** 2)) + (a2 * ((Δy - 1) ** 2)) -
(a2 * b2))
while Δy > 0:
if p < 0:
p += (b2 * ((2 * Δx) + 2)) + (a2 * ((-2 * Δy) + 3))
Δx += 1
Δy -= 1
else:
p += a2 * ((-2 * Δy) + 3)
Δy -= 1
ellipse_point(Δx, Δy)
def subsample(self, stride):
"""returns a subsampled copy of this image.
stride should be at least 2 but not too big; a stride of 2
produces an image ½ the width and height (¼ the original size),
a stride of 3 produces an image ⅓ the width and height, and so on.
Subsampling is fairly fast and produces good results for
photographs: but poor results for text for which scale() is best.
"""
assert (2 <= stride <= min(self.width // 2, self.height // 2) and
isinstance(stride, int))
pixels = create_array(self.width // stride, self.height // stride)
index = 0
height = self.height - (self.height % stride)
width = self.width - (self.width % stride)
for y in range(0, height, stride):
offset = y * self.width
for x in range(0, width, stride):
if index == len(pixels):
break
pixels[index] = self.pixels[offset + x]
index += 1
return self.from_data(self.width // stride, pixels)
def scale(self, ratio):
"""returns a smoothly scaled copy of this image
ratio is how much to scale by, e.g., 0.75 means reduce width and
height to ¾ their original size, 0.5 to half (making the image ¼
of the original size), and so on.
Scaling is slow but produces good results even for text;
subsample() is faster.
"""
assert 0 < ratio < 1
rows = round(self.height * ratio)
columns = round(self.width * ratio)
pixels = create_array(columns, rows)
yStep = self.height / rows
xStep = self.width / columns
index = 0
for row in range(rows):
y0 = round(row * yStep)
y1 = round(y0 + yStep)
for column in range(columns):
x0 = round(column * xStep)
x1 = round(x0 + xStep)
pixels[index] = self._mean(x0, y0, x1, y1)
index += 1
return self.from_data(columns, pixels)
def _mean(self, x0, y0, x1, y1):
αTotal, redTotal, greenTotal, blueTotal, count = 0, 0, 0, 0, 0
for y in range(y0, y1):
if y >= self.height:
break
offset = y * self.width
for x in range(x0, x1):
if x >= self.width:
break
α, r, g, b = self.argb_for_color(self.pixels[offset + x])
αTotal += α
redTotal += r
greenTotal += g
blueTotal += b
count += 1
α = round(αTotal / count)
r = round(redTotal / count)
g = round(greenTotal / count)
b = round(blueTotal / count)
return self.color_for_argb(α, r, g, b)
def __str__(self):
width = self.width or 0
height = self.height or 0
s = "{}x{}".format(width, height)
if self.filename:
s += " " + self.filename
return s
@property
def size(self):
"""Convenience method to return the image's size"""
return self.width, self.height
@staticmethod
def argb_for_color(color):
"""returns an ARGB quadruple for a color specified as an int or
a color name or an #HHH, #HHHH, #HHHHHH or #HHHHHHHH RGB
str---in the latter case the alpha channel is set to 0xFF
(solid) if not specified"""
if numpy is not None:
if isinstance(color, numpy.uint32):
color = int(color)
if isinstance(color, str):
color = color_for_name(color)
elif not isinstance(color, int) or not (0 <= color <= MAX_ARGB):
raise Error("invalid color {}".format(color))
α = (color >> 24) & MAX_COMPONENT
r = (color >> 16) & MAX_COMPONENT
g = (color >> 8) & MAX_COMPONENT
b = (color & MAX_COMPONENT)
return α, r, g, b
@staticmethod
def rgb_for_color(color):
"""returns an RGB triple for a color specified as an int or
a color name or an #HHH, #HHHH, #HHHHHH or #HHHHHHHH RGB
str---in the latter case the alpha channel is set to 0xFF
(solid) if not specified"""
return argb_for_color(color)[1:]
@staticmethod
def color_for_argb(α, r, g, b):
"""returns an int representing the given ARGB value"""
if (0 <= α <= MAX_COMPONENT and 0 <= r <= MAX_COMPONENT and
0 <= g <= MAX_COMPONENT and 0 <= b <= MAX_COMPONENT):
color = 0
color |= (((α & MAX_COMPONENT) << 24) |
((r & MAX_COMPONENT) << 16) |
((g & MAX_COMPONENT) << 8) | (b & MAX_COMPONENT))
return color
raise Error("invalid αrgb {}, {}, {}, {}".format(α, r, g, b))
@staticmethod
def color_for_rgb(r, g, b):
"""returns an int representing the given RGB value; the alpha
channel is set to 0xFF (solid)"""
return color_for_argb(MAX_COMPONENT, r, g, b)
@staticmethod
def color_for_name(name):
"""returns an ARGB int for a color specified as an int or
a color name or an #HHH, #HHHH, #HHHHHH or #HHHHHHHH RGB
str---in the latter case the alpha channel is set to 0xFF
(solid) if not specified"""
if name is None:
return ColorForName["transparent"]
if name.startswith("#"):
name = name[1:]
if len(name) == 3: # add solid alpha
name = "F" + name # now has 4 hex digits
if len(name) == 6: # add solid alpha
name = "FF" + name # now has the full 8 hex digits
if len(name) == 4: # originally #FFF or #FFFF
components = []
for h in name:
components.extend([h, h])
name = "".join(components) # now has the full 8 hex digits
return int(name, 16)
return ColorForName[name.lower()]
# ColorForName is a default dict so will always return a color,
# e.g., black
def _dump(self, file=sys.stdout, alpha=True):
assert (self.width * self.height) == len(self.pixels)
for y in range(self.height):
for x in range(self.width):
color = self.pixel(x, y)
if alpha:
file.write("{:08X} ".format(color))
else:
file.write("{:06X} ".format(color & CLEAR_ALPHA))
file.write("\n")
file.write("\n")
# Convenience functions
argb_for_color = Image.argb_for_color
rgb_for_color = Image.rgb_for_color
color_for_argb = Image.color_for_argb
color_for_rgb = Image.color_for_rgb
color_for_name = Image.color_for_name
def sanitized_name(name):
"""returns a name suitable for XBM and XPM images"""
name = re.sub(r"\W+", "", os.path.basename(os.path.splitext(name)[0]))
if not name or name[0].isdigit():
name = "z" + name
return name
def create_array(width, height, background=None):
"""returns an array.array or numpy.array of the correct size and
with the given background color"""
if numpy is not None:
if background is None:
return numpy.zeros(width * height, dtype=numpy.uint32)
else:
iterable = (background for _ in range(width * height))
return numpy.fromiter(iterable, numpy.uint32)
else:
# Use the smallest typecode that can store a 32-bit unsigned integer
typecode = "I" if array.array("I").itemsize >= 4 else "L"
background = (background if background is not None else
ColorForName["transparent"])
return array.array(typecode, [background] * width * height)
# Taken from rgb.txt and converted to ARGB (with the addition of
# transparent). Default is solid black.
ColorForName = collections.defaultdict(lambda: 0xFF000000, {
"transparent": 0x00000000, "aliceblue": 0xFFF0F8FF,
"antiquewhite": 0xFFFAEBD7, "antiquewhite1": 0xFFFFEFDB,
"antiquewhite2": 0xFFEEDFCC, "antiquewhite3": 0xFFCDC0B0,
"antiquewhite4": 0xFF8B8378, "aquamarine": 0xFF7FFFD4,
"aquamarine1": 0xFF7FFFD4, "aquamarine2": 0xFF76EEC6,
"aquamarine3": 0xFF66CDAA, "aquamarine4": 0xFF458B74,
"azure": 0xFFF0FFFF, "azure1": 0xFFF0FFFF, "azure2": 0xFFE0EEEE,
"azure3": 0xFFC1CDCD, "azure4": 0xFF838B8B, "beige": 0xFFF5F5DC,
"bisque": 0xFFFFE4C4, "bisque1": 0xFFFFE4C4, "bisque2": 0xFFEED5B7,
"bisque3": 0xFFCDB79E, "bisque4": 0xFF8B7D6B, "black": 0xFF000000,
"blanchedalmond": 0xFFFFEBCD, "blue": 0xFF0000FF, "blue1": 0xFF0000FF,
"blue2": 0xFF0000EE, "blue3": 0xFF0000CD, "blue4": 0xFF00008B,
"blueviolet": 0xFF8A2BE2, "brown": 0xFFA52A2A, "brown1": 0xFFFF4040,
"brown2": 0xFFEE3B3B, "brown3": 0xFFCD3333, "brown4": 0xFF8B2323,
"burlywood": 0xFFDEB887, "burlywood1": 0xFFFFD39B,
"burlywood2": 0xFFEEC591, "burlywood3": 0xFFCDAA7D,
"burlywood4": 0xFF8B7355, "cadetblue": 0xFF5F9EA0,
"cadetblue1": 0xFF98F5FF, "cadetblue2": 0xFF8EE5EE,
"cadetblue3": 0xFF7AC5CD, "cadetblue4": 0xFF53868B,
"chartreuse": 0xFF7FFF00, "chartreuse1": 0xFF7FFF00,
"chartreuse2": 0xFF76EE00, "chartreuse3": 0xFF66CD00,
"chartreuse4": 0xFF458B00, "chocolate": 0xFFD2691E,
"chocolate1": 0xFFFF7F24, "chocolate2": 0xFFEE7621,
"chocolate3": 0xFFCD661D, "chocolate4": 0xFF8B4513, "coral": 0xFFFF7F50,
"coral1": 0xFFFF7256, "coral2": 0xFFEE6A50, "coral3": 0xFFCD5B45,
"coral4": 0xFF8B3E2F, "cornflowerblue": 0xFF6495ED,
"cornsilk": 0xFFFFF8DC, "cornsilk1": 0xFFFFF8DC,
"cornsilk2": 0xFFEEE8CD, "cornsilk3": 0xFFCDC8B1,
"cornsilk4": 0xFF8B8878, "cyan": 0xFF00FFFF, "cyan1": 0xFF00FFFF,
"cyan2": 0xFF00EEEE, "cyan3": 0xFF00CDCD, "cyan4": 0xFF008B8B,
"darkblue": 0xFF00008B, "darkcyan": 0xFF008B8B,
"darkgoldenrod": 0xFFB8860B, "darkgoldenrod1": 0xFFFFB90F,
"darkgoldenrod2": 0xFFEEAD0E, "darkgoldenrod3": 0xFFCD950C,
"darkgoldenrod4": 0xFF8B6508, "darkgray": 0xFFA9A9A9,
"darkgreen": 0xFF006400, "darkgrey": 0xFFA9A9A9,
"darkkhaki": 0xFFBDB76B, "darkmagenta": 0xFF8B008B,
"darkolivegreen": 0xFF556B2F, "darkolivegreen1": 0xFFCAFF70,
"darkolivegreen2": 0xFFBCEE68, "darkolivegreen3": 0xFFA2CD5A,
"darkolivegreen4": 0xFF6E8B3D, "darkorange": 0xFFFF8C00,
"darkorange1": 0xFFFF7F00, "darkorange2": 0xFFEE7600,
"darkorange3": 0xFFCD6600, "darkorange4": 0xFF8B4500,
"darkorchid": 0xFF9932CC, "darkorchid1": 0xFFBF3EFF,
"darkorchid2": 0xFFB23AEE, "darkorchid3": 0xFF9A32CD,
"darkorchid4": 0xFF68228B, "darkred": 0xFF8B0000,
"darksalmon": 0xFFE9967A, "darkseagreen": 0xFF8FBC8F,
"darkseagreen1": 0xFFC1FFC1, "darkseagreen2": 0xFFB4EEB4,
"darkseagreen3": 0xFF9BCD9B, "darkseagreen4": 0xFF698B69,
"darkslateblue": 0xFF483D8B, "darkslategray": 0xFF2F4F4F,
"darkslategray1": 0xFF97FFFF, "darkslategray2": 0xFF8DEEEE,
"darkslategray3": 0xFF79CDCD, "darkslategray4": 0xFF528B8B,
"darkslategrey": 0xFF2F4F4F, "darkturquoise": 0xFF00CED1,
"darkviolet": 0xFF9400D3, "debianred": 0xFFD70751,
"deeppink": 0xFFFF1493, "deeppink1": 0xFFFF1493,
"deeppink2": 0xFFEE1289, "deeppink3": 0xFFCD1076,
"deeppink4": 0xFF8B0A50, "deepskyblue": 0xFF00BFFF,
"deepskyblue1": 0xFF00BFFF, "deepskyblue2": 0xFF00B2EE,
"deepskyblue3": 0xFF009ACD, "deepskyblue4": 0xFF00688B,
"dimgray": 0xFF696969, "dimgrey": 0xFF696969, "dodgerblue": 0xFF1E90FF,
"dodgerblue1": 0xFF1E90FF, "dodgerblue2": 0xFF1C86EE,
"dodgerblue3": 0xFF1874CD, "dodgerblue4": 0xFF104E8B,
"firebrick": 0xFFB22222, "firebrick1": 0xFFFF3030,
"firebrick2": 0xFFEE2C2C, "firebrick3": 0xFFCD2626,
"firebrick4": 0xFF8B1A1A, "floralwhite": 0xFFFFFAF0,
"forestgreen": 0xFF228B22, "gainsboro": 0xFFDCDCDC,
"ghostwhite": 0xFFF8F8FF, "gold": 0xFFFFD700, "gold1": 0xFFFFD700,
"gold2": 0xFFEEC900, "gold3": 0xFFCDAD00, "gold4": 0xFF8B7500,
"goldenrod": 0xFFDAA520, "goldenrod1": 0xFFFFC125,
"goldenrod2": 0xFFEEB422, "goldenrod3": 0xFFCD9B1D,
"goldenrod4": 0xFF8B6914, "gray0": 0xFF000000, "gray": 0xFFBEBEBE,
"gray100": 0xFFFFFFFF, "gray10": 0xFF1A1A1A, "gray1": 0xFF030303,
"gray11": 0xFF1C1C1C, "gray12": 0xFF1F1F1F, "gray13": 0xFF212121,
"gray14": 0xFF242424, "gray15": 0xFF262626, "gray16": 0xFF292929,
"gray17": 0xFF2B2B2B, "gray18": 0xFF2E2E2E, "gray19": 0xFF303030,
"gray20": 0xFF333333, "gray2": 0xFF050505, "gray21": 0xFF363636,
"gray22": 0xFF383838, "gray23": 0xFF3B3B3B, "gray24": 0xFF3D3D3D,
"gray25": 0xFF404040, "gray26": 0xFF424242, "gray27": 0xFF454545,
"gray28": 0xFF474747, "gray29": 0xFF4A4A4A, "gray30": 0xFF4D4D4D,
"gray3": 0xFF080808, "gray31": 0xFF4F4F4F, "gray32": 0xFF525252,
"gray33": 0xFF545454, "gray34": 0xFF575757, "gray35": 0xFF595959,
"gray36": 0xFF5C5C5C, "gray37": 0xFF5E5E5E, "gray38": 0xFF616161,
"gray39": 0xFF636363, "gray40": 0xFF666666, "gray4": 0xFF0A0A0A,
"gray41": 0xFF696969, "gray42": 0xFF6B6B6B, "gray43": 0xFF6E6E6E,
"gray44": 0xFF707070, "gray45": 0xFF737373, "gray46": 0xFF757575,
"gray47": 0xFF787878, "gray48": 0xFF7A7A7A, "gray49": 0xFF7D7D7D,
"gray50": 0xFF7F7F7F, "gray5": 0xFF0D0D0D, "gray51": 0xFF828282,
"gray52": 0xFF858585, "gray53": 0xFF878787, "gray54": 0xFF8A8A8A,
"gray55": 0xFF8C8C8C, "gray56": 0xFF8F8F8F, "gray57": 0xFF919191,
"gray58": 0xFF949494, "gray59": 0xFF969696, "gray60": 0xFF999999,
"gray6": 0xFF0F0F0F, "gray61": 0xFF9C9C9C, "gray62": 0xFF9E9E9E,
"gray63": 0xFFA1A1A1, "gray64": 0xFFA3A3A3, "gray65": 0xFFA6A6A6,
"gray66": 0xFFA8A8A8, "gray67": 0xFFABABAB, "gray68": 0xFFADADAD,
"gray69": 0xFFB0B0B0, "gray70": 0xFFB3B3B3, "gray7": 0xFF121212,
"gray71": 0xFFB5B5B5, "gray72": 0xFFB8B8B8, "gray73": 0xFFBABABA,
"gray74": 0xFFBDBDBD, "gray75": 0xFFBFBFBF, "gray76": 0xFFC2C2C2,
"gray77": 0xFFC4C4C4, "gray78": 0xFFC7C7C7, "gray79": 0xFFC9C9C9,
"gray80": 0xFFCCCCCC, "gray8": 0xFF141414, "gray81": 0xFFCFCFCF,
"gray82": 0xFFD1D1D1, "gray83": 0xFFD4D4D4, "gray84": 0xFFD6D6D6,
"gray85": 0xFFD9D9D9, "gray86": 0xFFDBDBDB, "gray87": 0xFFDEDEDE,
"gray88": 0xFFE0E0E0, "gray89": 0xFFE3E3E3, "gray90": 0xFFE5E5E5,
"gray9": 0xFF171717, "gray91": 0xFFE8E8E8, "gray92": 0xFFEBEBEB,
"gray93": 0xFFEDEDED, "gray94": 0xFFF0F0F0, "gray95": 0xFFF2F2F2,
"gray96": 0xFFF5F5F5, "gray97": 0xFFF7F7F7, "gray98": 0xFFFAFAFA,
"gray99": 0xFFFCFCFC, "green": 0xFF00FF00, "green1": 0xFF00FF00,
"green2": 0xFF00EE00, "green3": 0xFF00CD00, "green4": 0xFF008B00,
"greenyellow": 0xFFADFF2F, "grey0": 0xFF000000, "grey": 0xFFBEBEBE,
"grey100": 0xFFFFFFFF, "grey10": 0xFF1A1A1A, "grey1": 0xFF030303,
"grey11": 0xFF1C1C1C, "grey12": 0xFF1F1F1F, "grey13": 0xFF212121,
"grey14": 0xFF242424, "grey15": 0xFF262626, "grey16": 0xFF292929,
"grey17": 0xFF2B2B2B, "grey18": 0xFF2E2E2E, "grey19": 0xFF303030,
"grey20": 0xFF333333, "grey2": 0xFF050505, "grey21": 0xFF363636,
"grey22": 0xFF383838, "grey23": 0xFF3B3B3B, "grey24": 0xFF3D3D3D,
"grey25": 0xFF404040, "grey26": 0xFF424242, "grey27": 0xFF454545,
"grey28": 0xFF474747, "grey29": 0xFF4A4A4A, "grey30": 0xFF4D4D4D,
"grey3": 0xFF080808, "grey31": 0xFF4F4F4F, "grey32": 0xFF525252,
"grey33": 0xFF545454, "grey34": 0xFF575757, "grey35": 0xFF595959,
"grey36": 0xFF5C5C5C, "grey37": 0xFF5E5E5E, "grey38": 0xFF616161,
"grey39": 0xFF636363, "grey40": 0xFF666666, "grey4": 0xFF0A0A0A,
"grey41": 0xFF696969, "grey42": 0xFF6B6B6B, "grey43": 0xFF6E6E6E,
"grey44": 0xFF707070, "grey45": 0xFF737373, "grey46": 0xFF757575,
"grey47": 0xFF787878, "grey48": 0xFF7A7A7A, "grey49": 0xFF7D7D7D,
"grey50": 0xFF7F7F7F, "grey5": 0xFF0D0D0D, "grey51": 0xFF828282,
"grey52": 0xFF858585, "grey53": 0xFF878787, "grey54": 0xFF8A8A8A,
"grey55": 0xFF8C8C8C, "grey56": 0xFF8F8F8F, "grey57": 0xFF919191,
"grey58": 0xFF949494, "grey59": 0xFF969696, "grey60": 0xFF999999,
"grey6": 0xFF0F0F0F, "grey61": 0xFF9C9C9C, "grey62": 0xFF9E9E9E,
"grey63": 0xFFA1A1A1, "grey64": 0xFFA3A3A3, "grey65": 0xFFA6A6A6,
"grey66": 0xFFA8A8A8, "grey67": 0xFFABABAB, "grey68": 0xFFADADAD,
"grey69": 0xFFB0B0B0, "grey70": 0xFFB3B3B3, "grey7": 0xFF121212,
"grey71": 0xFFB5B5B5, "grey72": 0xFFB8B8B8, "grey73": 0xFFBABABA,
"grey74": 0xFFBDBDBD, "grey75": 0xFFBFBFBF, "grey76": 0xFFC2C2C2,
"grey77": 0xFFC4C4C4, "grey78": 0xFFC7C7C7, "grey79": 0xFFC9C9C9,
"grey80": 0xFFCCCCCC, "grey8": 0xFF141414, "grey81": 0xFFCFCFCF,
"grey82": 0xFFD1D1D1, "grey83": 0xFFD4D4D4, "grey84": 0xFFD6D6D6,
"grey85": 0xFFD9D9D9, "grey86": 0xFFDBDBDB, "grey87": 0xFFDEDEDE,
"grey88": 0xFFE0E0E0, "grey89": 0xFFE3E3E3, "grey90": 0xFFE5E5E5,
"grey9": 0xFF171717, "grey91": 0xFFE8E8E8, "grey92": 0xFFEBEBEB,
"grey93": 0xFFEDEDED, "grey94": 0xFFF0F0F0, "grey95": 0xFFF2F2F2,
"grey96": 0xFFF5F5F5, "grey97": 0xFFF7F7F7, "grey98": 0xFFFAFAFA,
"grey99": 0xFFFCFCFC, "honeydew": 0xFFF0FFF0, "honeydew1": 0xFFF0FFF0,
"honeydew2": 0xFFE0EEE0, "honeydew3": 0xFFC1CDC1,
"honeydew4": 0xFF838B83, "hotpink": 0xFFFF69B4, "hotpink1": 0xFFFF6EB4,
"hotpink2": 0xFFEE6AA7, "hotpink3": 0xFFCD6090, "hotpink4": 0xFF8B3A62,
"indianred": 0xFFCD5C5C, "indianred1": 0xFFFF6A6A,
"indianred2": 0xFFEE6363, "indianred3": 0xFFCD5555,
"indianred4": 0xFF8B3A3A, "ivory": 0xFFFFFFF0, "ivory1": 0xFFFFFFF0,
"ivory2": 0xFFEEEEE0, "ivory3": 0xFFCDCDC1, "ivory4": 0xFF8B8B83,
"khaki": 0xFFF0E68C, "khaki1": 0xFFFFF68F, "khaki2": 0xFFEEE685,
"khaki3": 0xFFCDC673, "khaki4": 0xFF8B864E, "lavender": 0xFFE6E6FA,
"lavenderblush": 0xFFFFF0F5, "lavenderblush1": 0xFFFFF0F5,
"lavenderblush2": 0xFFEEE0E5, "lavenderblush3": 0xFFCDC1C5,
"lavenderblush4": 0xFF8B8386, "lawngreen": 0xFF7CFC00,
"lemonchiffon": 0xFFFFFACD, "lemonchiffon1": 0xFFFFFACD,
"lemonchiffon2": 0xFFEEE9BF, "lemonchiffon3": 0xFFCDC9A5,
"lemonchiffon4": 0xFF8B8970, "lightblue": 0xFFADD8E6,
"lightblue1": 0xFFBFEFFF, "lightblue2": 0xFFB2DFEE,
"lightblue3": 0xFF9AC0CD, "lightblue4": 0xFF68838B,
"lightcoral": 0xFFF08080, "lightcyan": 0xFFE0FFFF,
"lightcyan1": 0xFFE0FFFF, "lightcyan2": 0xFFD1EEEE,
"lightcyan3": 0xFFB4CDCD, "lightcyan4": 0xFF7A8B8B,
"lightgoldenrod": 0xFFEEDD82, "lightgoldenrod1": 0xFFFFEC8B,
"lightgoldenrod2": 0xFFEEDC82, "lightgoldenrod3": 0xFFCDBE70,
"lightgoldenrod4": 0xFF8B814C, "lightgoldenrodyellow": 0xFFFAFAD2,
"lightgray": 0xFFD3D3D3, "lightgreen": 0xFF90EE90,
"lightgrey": 0xFFD3D3D3, "lightpink": 0xFFFFB6C1,
"lightpink1": 0xFFFFAEB9, "lightpink2": 0xFFEEA2AD,
"lightpink3": 0xFFCD8C95, "lightpink4": 0xFF8B5F65,
"lightsalmon": 0xFFFFA07A, "lightsalmon1": 0xFFFFA07A,
"lightsalmon2": 0xFFEE9572, "lightsalmon3": 0xFFCD8162,
"lightsalmon4": 0xFF8B5742, "lightseagreen": 0xFF20B2AA,
"lightskyblue": 0xFF87CEFA, "lightskyblue1": 0xFFB0E2FF,
"lightskyblue2": 0xFFA4D3EE, "lightskyblue3": 0xFF8DB6CD,
"lightskyblue4": 0xFF607B8B, "lightslateblue": 0xFF8470FF,
"lightslategray": 0xFF778899, "lightslategrey": 0xFF778899,
"lightsteelblue": 0xFFB0C4DE, "lightsteelblue1": 0xFFCAE1FF,
"lightsteelblue2": 0xFFBCD2EE, "lightsteelblue3": 0xFFA2B5CD,
"lightsteelblue4": 0xFF6E7B8B, "lightyellow": 0xFFFFFFE0,
"lightyellow1": 0xFFFFFFE0, "lightyellow2": 0xFFEEEED1,
"lightyellow3": 0xFFCDCDB4, "lightyellow4": 0xFF8B8B7A,
"limegreen": 0xFF32CD32, "linen": 0xFFFAF0E6, "magenta": 0xFFFF00FF,
"magenta1": 0xFFFF00FF, "magenta2": 0xFFEE00EE, "magenta3": 0xFFCD00CD,
"magenta4": 0xFF8B008B, "maroon": 0xFFB03060, "maroon1": 0xFFFF34B3,
"maroon2": 0xFFEE30A7, "maroon3": 0xFFCD2990, "maroon4": 0xFF8B1C62,
"mediumaquamarine": 0xFF66CDAA, "mediumblue": 0xFF0000CD,
"mediumorchid": 0xFFBA55D3, "mediumorchid1": 0xFFE066FF,
"mediumorchid2": 0xFFD15FEE, "mediumorchid3": 0xFFB452CD,
"mediumorchid4": 0xFF7A378B, "mediumpurple": 0xFF9370DB,
"mediumpurple1": 0xFFAB82FF, "mediumpurple2": 0xFF9F79EE,
"mediumpurple3": 0xFF8968CD, "mediumpurple4": 0xFF5D478B,
"mediumseagreen": 0xFF3CB371, "mediumslateblue": 0xFF7B68EE,
"mediumspringgreen": 0xFF00FA9A, "mediumturquoise": 0xFF48D1CC,
"mediumvioletred": 0xFFC71585, "midnightblue": 0xFF191970,
"mintcream": 0xFFF5FFFA, "mistyrose": 0xFFFFE4E1,
"mistyrose1": 0xFFFFE4E1, "mistyrose2": 0xFFEED5D2,
"mistyrose3": 0xFFCDB7B5, "mistyrose4": 0xFF8B7D7B,
"moccasin": 0xFFFFE4B5, "navajowhite": 0xFFFFDEAD,
"navajowhite1": 0xFFFFDEAD, "navajowhite2": 0xFFEECFA1,
"navajowhite3": 0xFFCDB38B, "navajowhite4": 0xFF8B795E,
"navy": 0xFF000080, "navyblue": 0xFF000080, "oldlace": 0xFFFDF5E6,
"olivedrab": 0xFF6B8E23, "olivedrab1": 0xFFC0FF3E,
"olivedrab2": 0xFFB3EE3A, "olivedrab3": 0xFF9ACD32,
"olivedrab4": 0xFF698B22, "orange": 0xFFFFA500, "orange1": 0xFFFFA500,
"orange2": 0xFFEE9A00, "orange3": 0xFFCD8500, "orange4": 0xFF8B5A00,
"orangered": 0xFFFF4500, "orangered1": 0xFFFF4500,
"orangered2": 0xFFEE4000, "orangered3": 0xFFCD3700,
"orangered4": 0xFF8B2500, "orchid": 0xFFDA70D6, "orchid1": 0xFFFF83FA,
"orchid2": 0xFFEE7AE9, "orchid3": 0xFFCD69C9, "orchid4": 0xFF8B4789,
"palegoldenrod": 0xFFEEE8AA, "palegreen": 0xFF98FB98,
"palegreen1": 0xFF9AFF9A, "palegreen2": 0xFF90EE90,
"palegreen3": 0xFF7CCD7C, "palegreen4": 0xFF548B54,
"paleturquoise": 0xFFAFEEEE, "paleturquoise1": 0xFFBBFFFF,
"paleturquoise2": 0xFFAEEEEE, "paleturquoise3": 0xFF96CDCD,
"paleturquoise4": 0xFF668B8B, "palevioletred": 0xFFDB7093,
"palevioletred1": 0xFFFF82AB, "palevioletred2": 0xFFEE799F,
"palevioletred3": 0xFFCD6889, "palevioletred4": 0xFF8B475D,
"papayawhip": 0xFFFFEFD5, "peachpuff": 0xFFFFDAB9,
"peachpuff1": 0xFFFFDAB9, "peachpuff2": 0xFFEECBAD,
"peachpuff3": 0xFFCDAF95, "peachpuff4": 0xFF8B7765,
"peru": 0xFFCD853F, "pink": 0xFFFFC0CB, "pink1": 0xFFFFB5C5,
"pink2": 0xFFEEA9B8, "pink3": 0xFFCD919E, "pink4": 0xFF8B636C,
"plum": 0xFFDDA0DD, "plum1": 0xFFFFBBFF, "plum2": 0xFFEEAEEE,
"plum3": 0xFFCD96CD, "plum4": 0xFF8B668B, "powderblue": 0xFFB0E0E6,
"purple": 0xFFA020F0, "purple1": 0xFF9B30FF, "purple2": 0xFF912CEE,
"purple3": 0xFF7D26CD, "purple4": 0xFF551A8B, "red": 0xFFFF0000,
"red1": 0xFFFF0000, "red2": 0xFFEE0000, "red3": 0xFFCD0000,
"red4": 0xFF8B0000, "rosybrown": 0xFFBC8F8F, "rosybrown1": 0xFFFFC1C1,
"rosybrown2": 0xFFEEB4B4, "rosybrown3": 0xFFCD9B9B,
"rosybrown4": 0xFF8B6969, "royalblue": 0xFF4169E1,
"royalblue1": 0xFF4876FF, "royalblue2": 0xFF436EEE,
"royalblue3": 0xFF3A5FCD, "royalblue4": 0xFF27408B,
"saddlebrown": 0xFF8B4513, "salmon": 0xFFFA8072, "salmon1": 0xFFFF8C69,
"salmon2": 0xFFEE8262, "salmon3": 0xFFCD7054, "salmon4": 0xFF8B4C39,
"sandybrown": 0xFFF4A460, "seagreen": 0xFF2E8B57,
"seagreen1": 0xFF54FF9F, "seagreen2": 0xFF4EEE94,
"seagreen3": 0xFF43CD80, "seagreen4": 0xFF2E8B57,
"seashell": 0xFFFFF5EE, "seashell1": 0xFFFFF5EE,
"seashell2": 0xFFEEE5DE, "seashell3": 0xFFCDC5BF,
"seashell4": 0xFF8B8682, "sienna": 0xFFA0522D, "sienna1": 0xFFFF8247,
"sienna2": 0xFFEE7942, "sienna3": 0xFFCD6839, "sienna4": 0xFF8B4726,
"skyblue": 0xFF87CEEB, "skyblue1": 0xFF87CEFF, "skyblue2": 0xFF7EC0EE,
"skyblue3": 0xFF6CA6CD, "skyblue4": 0xFF4A708B, "slateblue": 0xFF6A5ACD,
"slateblue1": 0xFF836FFF, "slateblue2": 0xFF7A67EE,
"slateblue3": 0xFF6959CD, "slateblue4": 0xFF473C8B,
"slategray": 0xFF708090, "slategray1": 0xFFC6E2FF,
"slategray2": 0xFFB9D3EE, "slategray3": 0xFF9FB6CD,
"slategray4": 0xFF6C7B8B, "slategrey": 0xFF708090, "snow": 0xFFFFFAFA,
"snow1": 0xFFFFFAFA, "snow2": 0xFFEEE9E9, "snow3": 0xFFCDC9C9,
"snow4": 0xFF8B8989, "springgreen": 0xFF00FF7F,
"springgreen1": 0xFF00FF7F, "springgreen2": 0xFF00EE76,
"springgreen3": 0xFF00CD66, "springgreen4": 0xFF008B45,
"steelblue": 0xFF4682B4, "steelblue1": 0xFF63B8FF,
"steelblue2": 0xFF5CACEE, "steelblue3": 0xFF4F94CD,
"steelblue4": 0xFF36648B, "tan": 0xFFD2B48C, "tan1": 0xFFFFA54F,
"tan2": 0xFFEE9A49, "tan3": 0xFFCD853F, "tan4": 0xFF8B5A2B,
"thistle": 0xFFD8BFD8, "thistle1": 0xFFFFE1FF, "thistle2": 0xFFEED2EE,
"thistle3": 0xFFCDB5CD, "thistle4": 0xFF8B7B8B, "tomato": 0xFFFF6347,
"tomato1": 0xFFFF6347, "tomato2": 0xFFEE5C42, "tomato3": 0xFFCD4F39,
"tomato4": 0xFF8B3626, "turquoise": 0xFF40E0D0,
"turquoise1": 0xFF00F5FF, "turquoise2": 0xFF00E5EE,
"turquoise3": 0xFF00C5CD, "turquoise4": 0xFF00868B,
"violet": 0xFFEE82EE, "violetred": 0xFFD02090, "violetred1": 0xFFFF3E96,
"violetred2": 0xFFEE3A8C, "violetred3": 0xFFCD3278,
"violetred4": 0xFF8B2252, "wheat": 0xFFF5DEB3, "wheat1": 0xFFFFE7BA,
"wheat2": 0xFFEED8AE, "wheat3": 0xFFCDBA96, "wheat4": 0xFF8B7E66,
"white": 0xFFFFFFFF, "whitesmoke": 0xFFF5F5F5, "yellow": 0xFFFFFF00,
"yellow1": 0xFFFFFF00, "yellow2": 0xFFEEEE00, "yellow3": 0xFFCDCD00,
"yellow4": 0xFF8B8B00, "yellowgreen": 0xFF9ACD32})
|
the-stack_106_16789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def check_zero_crossings(st, min_crossings=1.0, config=None):
"""
Check for a large enough density.
This is intended to screen out instrumental failures or resetting.
Value determined empirically from observations on the GeoNet network
by R Lee.
Args:
st (StationStream):
StationStream object.
min_crossings (float):
Minimum average number of zero crossings per second for the full
trace.
config (dict):
Configuration dictionary (or None). See get_config().
"""
zero_count_tr = []
delta_t = st[0].stats.delta
dur = (st[0].stats.npts - 1) * delta_t
for tr in st:
# Make a copy of the trace to trim it before counting crossings; we do
# not want to modify the trace but we only want to count the crossings
# within the trimmed window
if tr.hasParameter("signal_end") and (not tr.hasParameter("failure")):
etime = tr.getParameter("signal_end")["end_time"]
split_time = tr.getParameter("signal_split")["split_time"]
sig_start = int((split_time - tr.stats.starttime) / tr.stats.delta)
sig_end = int((etime - tr.stats.starttime) / tr.stats.delta)
tr_data = tr.data[sig_start:sig_end]
zarray = np.multiply(tr_data[0:-1], tr_data[1:])
zindices = [i for (i, z) in enumerate(zarray) if z < 0]
zero_count_tr = len(zindices)
z_rate = zero_count_tr / dur
# Put results back into the original trace, not the copy
tr.setParameter("ZeroCrossingRate", {"crossing_rate": z_rate})
# Fail if zero crossing rate is too low
if z_rate <= min_crossings:
tr.fail("Zero crossing rate too low.")
return st
|
the-stack_106_16792
|
import os
import cv2
import numpy as np
import logging
from colorlog import ColoredFormatter
import pinyin
import timeit
def get_logger(name=__name__):
logger_base = logging.getLogger(name)
logger_base.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
color_formatter = ColoredFormatter('%(log_color)s[%(module)-12s][%(funcName)-12s][%(levelname)-8s] %(message)s')
# formatter = logging.Formatter('[%(module)-15s][%(funcName)-7s][%(levelname)-8s] %(message)s')
stream_handler.setFormatter(color_formatter)
logger_base.addHandler(stream_handler)
return logger_base
logger = get_logger()
class HelperConfig:
wait_time = 500
# 最多等待一分钟就自动关闭
wait_time_forever = 60000
class Show:
class LineItem:
def __init__(self, row: int = 0, height: int = 0, x: int = 0):
self.row, self.height, self.x = row, height, x
def __str__(self):
return "Line[row=%s, height=%s, x=%s]" % (self.row, self.height, self.x)
class RectItem:
def __init__(self, x1: int = 0, y1: int = 0, x2: int = 0, y2: int = 0):
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
def to_tuple(self) -> tuple:
return self.x1, self.y1, self.x2, self.y2
offset = (300, 150)
resize = 4
starts = []
windows = []
fps_last_calls = []
# 每n秒计算fps
fps_calc_time = 1
fps_last_log = 0
@staticmethod
def fps_get():
time_now = timeit.default_timer()
Show.fps_last_calls.append(time_now)
# print(Show.fps_last_calls)
if Show.fps_calc_time != 0:
if len(Show.fps_last_calls) > 0 and Show.fps_last_calls[-1] - Show.fps_last_calls[0] < Show.fps_calc_time:
return 0
if len(Show.fps_last_calls) >= 3:
Show.fps_last_calls = Show.fps_last_calls[1:]
# 此时len == calc_len
d = [1 / (Show.fps_last_calls[i] - Show.fps_last_calls[i-1])
if (Show.fps_last_calls[i] - Show.fps_last_calls[i-1]) != 0 else 0
for i in range(1, len(Show.fps_last_calls))]
# print(Show.fps_last_calls, d)
return sum(d) / len(d)
@staticmethod
def fps_log():
time_now = timeit.default_timer()
fps = Show.fps_get()
if time_now - Show.fps_last_log < Show.fps_calc_time:
return
Show.fps_last_log = time_now
logger.info('fps: %s' % str(fps))
@staticmethod
def window_insert(row: int, width: int, height: int):
found = False
y = 0
for i in range(len(Show.starts)):
if Show.starts[i].row == row:
found = True
Show.starts[i].height = max(Show.starts[i].height, height)
Show.starts[i].x += width
y = i
break
if not found:
Show.starts.append(Show.LineItem(row=row, height=height, x=width))
Show.starts.sort(key=lambda x: x.row)
y = len(Show.starts) - 1
y_sum = sum([Show.starts[i].height for i in range(y)])
return Show.RectItem(Show.starts[y].x - width, y_sum,
Show.starts[y].x, y_sum + height)
@staticmethod
def window_clear():
cv2.destroyAllWindows()
Show.starts = []
Show.windows = []
@staticmethod
def imshow(window_name: str, im: np.ndarray, row: int = 0, use_pinyin=True):
if use_pinyin:
window_name = pinyin.get(window_name, format='strip', delimiter='')
else:
window_name = window_name.encode('gbk').decode('utf8', errors='ignore')
# logger.debug('im.shape: %s' % str(im.shape))
im = cv2.resize(im, (im.shape[1] // Show.resize, im.shape[0] // Show.resize))
im_size = im.shape
rect = Show.window_insert(row, im_size[1], im_size[0])
cv2.imshow(window_name, im)
if window_name not in Show.windows:
cv2.moveWindow(window_name, rect.x1 + Show.offset[0], rect.y1 + Show.offset[1])
Show.windows.append(window_name)
# logger.debug('starts now: %s' % [str(i) for i in Show.starts])
class Controls:
@staticmethod
def wait_exit(wait_time: int = HelperConfig.wait_time, key: int = 27):
wait_time = HelperConfig.wait_time_forever if wait_time == 0 else wait_time
if cv2.waitKey(wait_time) == key:
Controls.exit_kill()
# 干掉最后一个python进程
@staticmethod
def exit_kill():
result = os.popen('wmic process where name="python.exe" list brief')
data = result.read().split('\n')
pid = [d.split() for d in data if len(d) != 0 and data.index(d) != 0][-1][3]
logger.warning('Try to kill PID:%s...' % pid)
result = os.popen("taskkill.exe /f /pid %s" % pid)
logger.critical('result: %s' % result.read())
# 调参
@staticmethod
def adjust(bar_name: str, window_name: str, im: np.ndarray, val: int, count, my_onchange, use_pinyin=True):
if use_pinyin:
window_name = pinyin.get(window_name, format='strip', delimiter='')
bar_name = pinyin.get(bar_name, format='strip', delimiter='')
else:
window_name = window_name.encode('gbk').decode('utf8', errors='ignore')
bar_name = bar_name.encode('gbk').decode('utf8', errors='ignore')
def onchange(obj):
my_onchange(window_name, im, cv2.getTrackbarPos(bar_name, window_name))
cv2.namedWindow(window_name)
cv2.createTrackbar(bar_name, window_name, val, count, onchange)
# 先调用一波
onchange(None)
class ArgBase:
def __init__(self, bar_name: str, val: int = 0, val_max: int = 255):
self.bar_name, self.val, self.val_max = bar_name, val, val_max
self.window_name, self.image = None, None
def append_arg_window_name(self, window_name: str):
self.window_name = window_name
class Arg:
def __init__(self, bar_name: str, onchange, val: int = 0, val_max: int = 255):
self.bar_name, self.onchange, self.val, self.val_max = bar_name, onchange, val, val_max
self.window_name, self.image = None, None
def append_args(self, window_name: str, image: np.ndarray):
self.window_name, self.image = window_name, image
def call(self, obj=None):
if self.window_name is None or self.image is None:
logger.warning('Calling an item before appending args!')
return
self.onchange(self.window_name, self.image, cv2.getTrackbarPos(self.bar_name, self.window_name))
# 调参:多个参数
@staticmethod
def adjust_multi(window_name: str, im: np.ndarray, args: list = None, use_pinyin=True):
try:
if args is None or len(args) == 0:
return
except TypeError:
return
if use_pinyin:
window_name = pinyin.get(window_name, format='strip', delimiter='')
else:
window_name = window_name.encode('gbk').decode('utf8', errors='ignore')
cv2.namedWindow(window_name)
for i in range(len(args)):
if use_pinyin:
args[i].bar_name = pinyin.get(args[i].bar_name, format='strip', delimiter='')
else:
args[i].bar_name = bar_name = args[i].bar_name.encode('gbk').decode('utf8', errors='ignore')
args[i].append_args(window_name, im)
logger.debug('creating track: %s, %s' % (args[i].window_name, args[i].bar_name))
cv2.createTrackbar(args[i].bar_name, args[i].window_name, args[i].val, args[i].val_max, args[i].call)
# 先调用一波
args[i].call()
# 调参:多个参数,同时调整
@staticmethod
def adjust_x(window_name: str, im: np.ndarray, onchange, args: list = None, use_pinyin=True):
try:
if args is None or len(args) == 0:
return
except TypeError:
return
def _onchange(obj):
onchange(window_name, im, args)
if use_pinyin:
window_name = pinyin.get(window_name, format='strip', delimiter='')
else:
window_name = window_name.encode('gbk').decode('utf8', errors='ignore')
cv2.namedWindow(window_name)
for i in range(len(args)):
if use_pinyin:
args[i].bar_name = pinyin.get(args[i].bar_name, format='strip', delimiter='')
else:
args[i].bar_name = bar_name = args[i].bar_name.encode('gbk').decode('utf8', errors='ignore')
args[i].append_arg_window_name(window_name)
logger.debug('creating track: %s, %s' % (args[i].window_name, args[i].bar_name))
cv2.createTrackbar(args[i].bar_name, args[i].window_name, args[i].val, args[i].val_max, _onchange)
# 先调用一波
onchange(window_name, im, args)
class Utils:
@staticmethod
def extend_line(x1, y1, x2, y2, x, y, flag=1, k_=None):
if flag == 1:
if y1 == y2:
return 0, y1, x, y2
else:
k = ((y2 - y1) / (x2 - x1) if x2 - x1 != 0 else 1000) if k_ is None else k_
b = (x1 * y2 - x2 * y1) / (x1 - x2) if x2 - x1 != 0 else 1000
x3 = 0
y3 = b
x4 = x
y4 = int(k * x4 + b)
return x3, y3, x4, y4
else:
if x1 == x2:
return x1, 0, x2, y
else:
k = ((y2 - y1) / (x2 - x1) if x2 - x1 != 0 else 1000) if k_ is None else k_
b = (x1 * y2 - x2 * y1) / (x1 - x2) if x2 - x1 != 0 else 1000
y3 = 0
x3 = int(-1 * b / k)
y4 = y
x4 = int((y4 - b) / k)
return x3, y3, x4, y4
|
the-stack_106_16793
|
from bs4 import BeautifulSoup
from molo.core.models import Main
from molo.polls.tests.base import BasePollsTestCase
from molo.polls.models import (
PollsIndexPage,
Question,
)
class TestMultiSitePolls(BasePollsTestCase):
def test_multi_site_different_polls(self):
# create poll on site 1
first_site_poll_text = 'site 1 poll'
second_site_poll_text = 'site 2 poll'
question_site1 = Question(title=first_site_poll_text)
self.polls_index.add_child(instance=question_site1)
question_site1.save_revision().publish()
# create poll on site 2
question_site2 = Question(title=second_site_poll_text)
self.polls_index_main2.add_child(instance=question_site2)
question_site2.save_revision().publish()
# request site 1
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
# check title is there
self.assertContains(response, first_site_poll_text)
self.assertNotContains(response, second_site_poll_text)
# request site 2
response = self.client2.get('/')
self.assertEquals(response.status_code, 200)
# check that site 2 poll is there
self.assertNotContains(response, first_site_poll_text)
self.assertContains(response, second_site_poll_text)
class TestDeleteButtonRemoved(BasePollsTestCase):
def test_delete_btn_removed_for_polls_index_page_in_main(self):
self.client.login(
username=self.superuser_name,
password=self.superuser_password
)
main_page = Main.objects.first()
response = self.client.get('/admin/pages/{0}/'
.format(str(main_page.pk)))
self.assertEquals(response.status_code, 200)
polls_index_page_title = (
PollsIndexPage.objects.first().title)
soup = BeautifulSoup(response.content, 'html.parser')
index_page_rows = soup.find_all('tbody')[0].find_all('tr')
for row in index_page_rows:
if row.h2.a.string == polls_index_page_title:
self.assertTrue(row.find('a', string='Edit'))
self.assertFalse(row.find('a', string='Delete'))
def test_delete_button_removed_from_dropdown_menu(self):
self.client.login(
username=self.superuser_name,
password=self.superuser_password
)
polls_index_page = PollsIndexPage.objects.first()
response = self.client.get('/admin/pages/{0}/'
.format(str(polls_index_page.pk)))
self.assertEquals(response.status_code, 200)
delete_link = ('<a href="/admin/pages/{0}/delete/" '
'title="Delete this page" class="u-link '
'is-live ">Delete</a>'
.format(str(polls_index_page.pk)))
self.assertNotContains(response, delete_link, html=True)
def test_delete_button_removed_in_edit_menu(self):
self.client.login(
username=self.superuser_name,
password=self.superuser_password
)
polls_index_page = PollsIndexPage.objects.first()
response = self.client.get('/admin/pages/{0}/edit/'
.format(str(polls_index_page.pk)))
self.assertEquals(response.status_code, 200)
delete_button = ('<li><a href="/admin/pages/{0}/delete/" '
'class="shortcut">Delete</a></li>'
.format(str(polls_index_page.pk)))
self.assertNotContains(response, delete_button, html=True)
|
the-stack_106_16795
|
from __future__ import absolute_import, division, print_function
import math
import numbers
import re
import textwrap
from distutils.version import LooseVersion
import sys
import traceback
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.api.types import (
is_categorical_dtype,
is_scalar,
is_sparse,
is_period_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
)
from .extensions import make_array_nonempty, make_scalar
from ..base import is_dask_collection
from ..compatibility import PY2, Iterator, Mapping
from ..core import get_deps
from ..local import get_sync
from ..utils import asciitable, is_arraylike, Dispatch, typename
from ..utils import is_dataframe_like as dask_is_dataframe_like
from ..utils import is_series_like as dask_is_series_like
from ..utils import is_index_like as dask_is_index_like
PANDAS_VERSION = LooseVersion(pd.__version__)
PANDAS_GT_0230 = PANDAS_VERSION >= LooseVersion("0.23.0")
PANDAS_GT_0240 = PANDAS_VERSION >= LooseVersion("0.24.0rc1")
HAS_INT_NA = PANDAS_GT_0240
def is_integer_na_dtype(t):
dtype = getattr(t, "dtype", t)
if HAS_INT_NA:
types = (
pd.Int8Dtype,
pd.Int16Dtype,
pd.Int32Dtype,
pd.Int64Dtype,
pd.UInt8Dtype,
pd.UInt16Dtype,
pd.UInt32Dtype,
pd.UInt64Dtype,
)
else:
types = ()
return isinstance(dtype, types)
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[: indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i] : indices[i + 1]]
yield df.iloc[indices[-1] :]
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided (note that the order of
the names should match the order of the columns). Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
indent = " " * kwargs.get("pad", 8)
body = textwrap.wrap(
_META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78
)
descr = "{0}\n{1}".format(_META_TYPES, "\n".join(body))
if f.__doc__:
if "$META" in f.__doc__:
f.__doc__ = f.__doc__.replace("$META", descr)
else:
# Put it at the end of the parameters section
parameter_header = "Parameters\n%s----------" % indent[4:]
first, last = re.split("Parameters\\n[ ]*----------", f.__doc__)
parameters, rest = last.split("\n\n", 1)
f.__doc__ = "{0}{1}{2}\n{3}{4}\n\n{5}".format(
first, parameter_header, parameters, indent[4:], descr, rest
)
return f
@contextmanager
def raise_on_meta_error(funcname=None, udf=False):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
msg = "Metadata inference failed{0}.\n\n"
if udf:
msg += (
"You have supplied a custom function and Dask is unable to \n"
"determine the type of output that that function returns. \n\n"
"To resolve this please provide a meta= keyword.\n"
"The docstring of the Dask function you ran should have more information.\n\n"
)
msg += (
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
)
msg = msg.format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb)
raise ValueError(msg)
UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__"
def has_known_categories(x):
"""Returns whether the categories in `x` are known.
Parameters
----------
x : Series or CategoricalIndex
"""
x = getattr(x, "_meta", x)
if isinstance(x, pd.Series):
return UNKNOWN_CATEGORIES not in x.cat.categories
elif isinstance(x, pd.CategoricalIndex):
return UNKNOWN_CATEGORIES not in x.categories
raise TypeError("Expected Series or CategoricalIndex")
def strip_unknown_categories(x):
"""Replace any unknown categoricals with empty categoricals.
Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
cat_mask = x.dtypes == "category"
if cat_mask.any():
cats = cat_mask[cat_mask].index
for c in cats:
if not has_known_categories(x[c]):
x[c].cat.set_categories([], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype) and not has_known_categories(x):
x.cat.set_categories([], inplace=True)
if isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(
x.index
):
x.index = x.index.set_categories([])
elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):
x = x.set_categories([])
return x
def clear_known_categories(x, cols=None, index=True):
"""Set categories to be unknown.
Parameters
----------
x : DataFrame, Series, Index
cols : iterable, optional
If x is a DataFrame, set only categoricals in these columns to unknown.
By default, all categorical columns are set to unknown categoricals
index : bool, optional
If True and x is a Series or DataFrame, set the clear known categories
in the index as well.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = x.dtypes == "category"
if cols is None:
cols = mask[mask].index
elif not mask.loc[cols].all():
raise ValueError("Not all columns are categoricals")
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if index and isinstance(x.index, pd.CategoricalIndex):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == "category":
return pd.Series(
pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index
).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
make_meta = Dispatch("make_meta")
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
@make_meta.register(object)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, "_meta"):
return x._meta
elif is_arraylike(x):
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), " "got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
meta_nonempty = Dispatch("meta_nonempty")
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
data = {i: _nonempty_series(x.iloc[:, i], idx=idx) for i, c in enumerate(x.columns)}
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of " "type {0}".format(typename(type(idx)))
)
_simple_fake_mapping = {
"b": np.bool_(True),
"V": np.void(b" "),
"M": np.datetime64("1970-01-01"),
"m": np.timedelta64(1),
"S": np.str_("foo"),
"a": np.str_("foo"),
"U": np.unicode_("foo"),
"O": "foo",
}
def _scalar_from_dtype(dtype):
if dtype.kind in ("i", "f", "u"):
return dtype.type(1)
elif dtype.kind == "c":
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ("m", "M") else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
def _nonempty_scalar(x):
if type(x) in make_scalar._lookup:
return make_scalar(x)
if np.isscalar(x):
dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x))
return make_scalar(dtype)
raise TypeError("Can't handle meta of type " "'{0}'".format(typename(type(x))))
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = None
data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)
elif is_integer_na_dtype(dtype):
data = pd.array([1, None], dtype=dtype)
elif is_period_dtype(dtype):
# pandas 0.24.0+ should infer this to be Series[Period[freq]]
freq = dtype.freq
data = [pd.Period("2000", freq), pd.Period("2001", freq)]
elif is_sparse(dtype):
# TODO: pandas <0.24
# Pandas <= 0.23.4:
if PANDAS_GT_0240:
entry = _scalar_from_dtype(dtype.subtype)
else:
entry = _scalar_from_dtype(dtype.subtype)
data = pd.SparseArray([entry, entry], dtype=dtype)
elif is_interval_dtype(dtype):
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_0240:
data = pd.array([entry, entry], dtype=dtype)
else:
data = np.array([entry, entry], dtype=dtype)
elif type(dtype) in make_array_nonempty._lookup:
data = make_array_nonempty(dtype)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def is_dataframe_like(df):
return dask_is_dataframe_like(df)
def is_series_like(s):
return dask_is_series_like(s)
def is_index_like(s):
return dask_is_index_like(s)
def check_meta(x, meta, funcname=None, numeric_equal=True):
"""Check that the dask metadata matches the result.
If metadata matches, ``x`` is passed through unchanged. A nice error is
raised if metadata doesn't match.
Parameters
----------
x : DataFrame, Series, or Index
meta : DataFrame, Series, or Index
The expected metadata that ``x`` should match
funcname : str, optional
The name of the function in which the metadata was specified. If
provided, the function name will be included in the error message to be
more helpful to users.
numeric_equal : bool, optionl
If True, integer and floating dtypes compare equal. This is useful due
to panda's implicit conversion of integer to floating upon encountering
missingness, which is hard to infer statically.
"""
eq_types = {"i", "f", "u"} if numeric_equal else set()
def equal_dtypes(a, b):
if is_categorical_dtype(a) != is_categorical_dtype(b):
return False
if isinstance(a, str) and a == "-" or isinstance(b, str) and b == "-":
return False
if is_categorical_dtype(a) and is_categorical_dtype(b):
if UNKNOWN_CATEGORIES in a.categories or UNKNOWN_CATEGORIES in b.categories:
return True
return a == b
return (a.kind in eq_types and b.kind in eq_types) or (a == b)
if not (
is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta)
) or is_dask_collection(meta):
raise TypeError(
"Expected partition to be DataFrame, Series, or "
"Index, got `%s`" % typename(type(meta))
)
if type(x) != type(meta):
errmsg = "Expected partition of type `%s` but got " "`%s`" % (
typename(type(meta)),
typename(type(x)),
)
elif is_dataframe_like(meta):
kwargs = dict()
if PANDAS_VERSION >= "0.23.0":
kwargs["sort"] = True
dtypes = pd.concat([x.dtypes, meta.dtypes], axis=1, **kwargs)
bad_dtypes = [
(col, a, b)
for col, a, b in dtypes.fillna("-").itertuples()
if not equal_dtypes(a, b)
]
if bad_dtypes:
errmsg = "Partition type: `%s`\n%s" % (
typename(type(meta)),
asciitable(["Column", "Found", "Expected"], bad_dtypes),
)
elif not np.array_equal(np.nan_to_num(meta.columns), np.nan_to_num(x.columns)):
errmsg = (
"The columns in the computed data do not match"
" the columns in the provided metadata.\n"
" %s\n :%s" % (meta.columns, x.columns)
)
else:
return x
else:
if equal_dtypes(x.dtype, meta.dtype):
return x
errmsg = "Partition type: `%s`\n%s" % (
typename(type(meta)),
asciitable(["", "dtype"], [("Found", x.dtype), ("Expected", meta.dtype)]),
)
raise ValueError(
"Metadata mismatch found%s.\n\n"
"%s" % ((" in `%s`" % funcname if funcname else ""), errmsg)
)
def index_summary(idx, name=None):
"""Summarized representation of an Index.
"""
n = len(idx)
if name is None:
name = idx.__class__.__name__
if n:
head = idx[0]
tail = idx[-1]
summary = ", {} to {}".format(head, tail)
else:
summary = ""
return "{}: {} entries{}".format(name, n, summary)
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True, result=None):
import dask.dataframe as dd
if hasattr(dsk, "dask"):
if result is None:
result = dsk.compute(scheduler="sync")
if isinstance(dsk, dd.Index):
assert "Index" in type(result).__name__, type(result)
# assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
assert dsk.name == result.name
assert dsk._meta.name == result.name
if isinstance(result, pd.MultiIndex):
assert result.names == dsk._meta.names
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert "Series" in type(result).__name__, type(result)
assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(
dsk.index,
check_names=check_names,
check_dtypes=check_dtypes,
result=result.index,
)
elif isinstance(dsk, dd.DataFrame):
assert "DataFrame" in type(result).__name__, type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(
dsk.index,
check_names=check_names,
check_dtypes=check_dtypes,
result=result.index,
)
elif isinstance(dsk, dd.core.Scalar):
assert np.isscalar(result) or isinstance(
result, (pd.Timestamp, pd.Timedelta)
)
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = "Unsupported dask instance {0} found".format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if is_dataframe_like(a):
if set(a.index.names) & set(a.columns):
a.index.names = [
"-overlapped-index-name-%d" % i for i in range(len(a.index.names))
]
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(
a,
b,
check_names=True,
check_dtypes=True,
check_divisions=True,
check_index=True,
**kwargs
):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
if hasattr(a, "divisions") and hasattr(b, "divisions"):
at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python
bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion
assert at == bt, (at, bt)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if hasattr(a, "to_pandas"):
a = a.to_pandas()
if hasattr(b, "to_pandas"):
b = b.to_pandas()
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, "dask"):
dask = dask.dask
assert isinstance(dask, Mapping)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
raise AssertionError(
"given dask graph doesn't contain label: {label}".format(label=label)
)
def assert_divisions(ddf):
if not hasattr(ddf, "divisions"):
return
if not getattr(ddf, "known_divisions", False):
return
def index(x):
if is_index_like(x):
return x
try:
return x.index.get_level_values(0)
except AttributeError:
return x.index
results = get_sync(ddf.dask, ddf.__dask_keys__())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, "dask"):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert " " not in k
if not PY2:
assert k.split("-")[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_type_sets = [{"O", "S", "U", "a"}] # treat object and strings alike
if numeric_equal:
eq_type_sets.append({"i", "f", "u"})
def eq_dtypes(a, b):
return any(
a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets
) or (a == b)
if not is_dask_collection(res) and is_dataframe_like(res):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples():
assert eq_dtypes(a, b)
elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):
a = ddf._meta.dtype
b = res.dtype
assert eq_dtypes(a, b)
else:
if hasattr(ddf._meta, "dtype"):
a = ddf._meta.dtype
if not hasattr(res, "dtype"):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert eq_dtypes(a, b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
def valid_divisions(divisions):
""" Are the provided divisions valid?
Examples
--------
>>> valid_divisions([1, 2, 3])
True
>>> valid_divisions([3, 2, 1])
False
>>> valid_divisions([1, 1, 1])
False
>>> valid_divisions([0, 1, 1])
True
>>> valid_divisions(123)
False
>>> valid_divisions([0, float('nan'), 1])
False
"""
if not isinstance(divisions, (tuple, list)):
return False
for i, x in enumerate(divisions[:-2]):
if x >= divisions[i + 1]:
return False
if isinstance(x, numbers.Number) and math.isnan(x):
return False
for x in divisions[-2:]:
if isinstance(x, numbers.Number) and math.isnan(x):
return False
if divisions[-2] > divisions[-1]:
return False
return True
|
the-stack_106_16796
|
from __future__ import print_function
import os
import pathlib as p
import copy
import subprocess
import shutil
import sys
from math import floor
# GRABS .mp4 OR .mkv FILES (preferably called E??.mp4/.mkv) AND TRANSFORMS THEM INTO DASH READY FILES, ORGANISED INTO vid?? DIRECTORIES (for the dash) AND E?? DIRECTORIES (for the .mp4)
# REQUIRES ffmpeg AND bento4 UTILITIES TO BE ACCESSIBLE EVERYWHERE ON THE DEVICE !!!!!!!!!!!!!
# REQUIRES PYTHON 2.x !!!!!!!!!!!!
while True:
subsYN = input('Subtitles ? (y/n): ')
if subsYN in ['y','Y','n','N']:
break
print('Incorrect answer...')
if subsYN in ['y','Y']:
subsYN = True
else:
subsYN = False
subs=''
if subsYN:
print('')
while True:
subsType = input('Subtitles encapsuled in media file (1)? Or external subtitles (2)? : ')
if subsType in ['1','2']:
break
print('Incorret answer...')
if subsType == '1':
subs=' --subtitles'
else:
subsL = input('Subtitle language ? (default eng): ')
if subsL == '':
subsL = 'en'
subs = ' [+format=webvtt,+language='+subsL+']'
print('')
cur=p.Path.cwd()
pp=[x for x in cur.iterdir()]
ppp=[x for x in pp if not(x.is_dir())]
PP=[str(x) for x in ppp]
for j in range(len(PP)):
k=len(PP[j])-1
while PP[j][k]!='\\':
k-=1
PP[j]=PP[j][k+1:]
PPP=[x.split('.')[0] for x in PP]
PPp=[x.split('.')[-1] for x in PP]
PPPP=[x+'-frag.mp4' for x in PPP]
# Fragment each media file to file-frag.mp4 AND move original media files to /E?
i=0
print("REMUX EVENTUEL ET FRAGMENTATION","=================================",sep='\n')
while i<len(PP):
testInt= True
try:
shit = int(PPP[i][1:])
except ValueError:
testInt = False
if (PPp[i] in ['mp4','mkv']) and testInt and PP[i][0]=='E':
i+=1
else:
del PP[i]
del PPP[i]
del PPp[i]
del PPPP[i]
for i in range(len(PP)):
k=0
while k<i and int(PPP[k][1:])<int(PPP[i][1:]):
k+=1
PP=PP[:k]+[PP[i]]+PP[k:i]+PP[i+1:]
PPP=PPP[:k]+[PPP[i]]+PPP[k:i]+PPP[i+1:]
PPPP=PPPP[:k]+[PPPP[i]]+PPPP[k:i]+PPPP[i+1:]
PPp=PPp[:k]+[PPp[i]]+PPp[k:i]+PPp[i+1:]
length=len(PP)
for i in range(len(PP)):
print(PPP[i],' |','='*int(floor(i/length*20)),' '*(20-int(floor(i/length*20))),'| ',int(floor(i/length*100)),'%', sep='', end='\r')
sys.stdout.flush()
if PPp[i]=='mkv':
if subsYN and subsType=='1':
subprocess.call(['ffmpeg','-loglevel','quiet','-i',PP[i],'-c','copy','-c:s','mov_text',PPP[i]+'.mp4'])
else:
subprocess.call(['ffmpeg','-loglevel','quiet','-i',PP[i],'-c','copy',PPP[i]+'.mp4'])
os.system('del '+PP[i])
PP[i]=PPP[i]+'.mp4'
os.system('mkdir '+PPP[i])
subprocess.call(['mp4fragment',PP[i],PPPP[i]], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
shutil.move(PP[i],PPP[i])
print(PPP[length-1],' |','='*20,'| 100%',sep='')
# Partition each file-frag.mp4 using mp4dash
FNULL = open(os.devnull,'w')
print("",'SECTIONNING FILES','====================', sep='\n')
for i in range(len(PP)):
print(PPP[i],' |','='*int(floor(i/length*20)),' '*(20-int(floor(i/length*20))),'| ',int(floor(i/length*100)),'%', sep='', end='\r')
sys.stdout.flush()
if subsYN and subs != ' --subtitles':
subsOut = subs+PPP[i]+'.srt'
else:
subsOut=subs
dumbProcess = subprocess.call('mp4dash -o vid'+PPP[i]+subsOut+' --mpd-name=manifest.mpd --use-segment-timeline --force '+PPPP[i], shell=True, stdout=FNULL)
if subsYN and subs != ' --subtitles':
os.system('del '+PPP[i]+'.srt')
os.system('del '+PPPP[i])
shutil.move('vid'+PPP[i],PPP[i])
os.chdir(PPP[i])
os.system('ren vid'+PPP[i]+' vid')
os.system('ren '+PP[i]+' vid.mp4')
os.chdir('..')
print(PPP[length-1],' |','='*20,'| 100%',sep='')
print('')
shit = input('Press <Enter> to continue...')
|
the-stack_106_16797
|
#!/usr/bin/env python3
# GUI for the Python scripts of DARx automation. Made on https://github.com/chriskiehl/Gooey
from gooey import Gooey, GooeyParser
import sys
import module_run
import module_home
import module_rvol
import module_calibrate
import config
import streamtologger
import json
def get_positions(): #getting positions of syringe pumps from config_[name of the host].ini
return config.readvalue('antibody'), config.readvalue('reagent'), config.readvalue('needle')
antibody,reagent,needle= get_positions()
@Gooey(program_name="DarX Run", progress_regex=r".*?Cycle: (\d+)/(\d+)$", show_success_modal=False,optional_cols=3, progress_expr="x[0] / x[1] * 100", show_sidebar=True, program_description="Script configurator",image_dir="/home/pi/darx/icons/")
def parse_args():
parser = GooeyParser()
subparsers = parser.add_subparsers(help='options', dest='subparser_name')
home_parser = subparsers.add_parser('Home')
rvol_parser = subparsers.add_parser('Rvol')
prerun_parser = subparsers.add_parser('Pre-run')
run_parser = subparsers.add_parser('Run')
manual_parser = subparsers.add_parser('Manual')
release_parser = subparsers.add_parser('Release')
home_fields = home_parser.add_argument_group('Homing all syringes', 'Current positions of syringes:\nAntibody: {} steps\nReagent: {} steps\nNeedle position: {}\n'.format(antibody,reagent,needle))
rvol_fields = rvol_parser.add_argument_group('Measuring reactor volume', 'The routine permits to measure the reactor volume\nPress Start to execute',gooey_options={'columns': 3})
prerun_fields = prerun_parser.add_argument_group('Pre-run homogenisation routines', 'Executes two in-out cycles to homogenise\nPlease provide the reactor volume in uL')
run_fields = run_parser.add_argument_group('DarX parameters')
manual_fields = manual_parser.add_argument_group('Control induvidial components', 'Press Start to execute', gooey_options={'columns': 3})
release_fields = release_parser.add_argument_group('Release routines', 'Press Start to execute', gooey_options={'columns': 3})
home_fields.add_argument("-home", metavar= "Homing utility", default='Press Start to execute')
rvol_fields.add_argument("-rvol_speed", metavar= "Speed in, ml/min", default=0.5)
prerun_fields.add_argument("-volout", metavar="Pre-run parameters", help='Execute pre-run routines. Indicate the reactor volume',default=5000, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000','message': 'Must be between 1 and 10000 uL'
}}, type=int)
prerun_fields.add_argument("-volout_uv", metavar="UV", default=False, action="store_true")
run_fields_optional = run_parser.add_argument_group('Advanced parameters', gooey_options={'columns': 3})
run_fields.add_argument("name", metavar="Experiment name", type=str)
run_fields.add_argument("-onlygenerate", metavar="Generate only", default=False, action="store_true", help="Only generate .py file")
run_fields.add_argument("-rvol", metavar="Reactor volume, uL", default=5000, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000',
'message': 'Must be between 1 and 10000 uL'
}}, type=int)
run_fields.add_argument("-cycles", metavar="Number of cycles", default=20, gooey_options={'validator':{'test': '0 < int(user_input) <= 200',
'message': 'Must be between 1 and 200 cycles'
}}, type=int)
run_fields.add_argument("-add", metavar="Reagent volume, uL", default=50, gooey_options={'validator':{'test': '0 < int(user_input) <= 100',
'message': 'Must be between 0 and 100 uL'
}}, type=float)
run_fields.add_argument("-coeff", metavar="Reagent decrease coefficient", default=0.95, gooey_options={'validator':{'test': '0 < float(user_input) <= 1',
'message': 'Must be between 0 and 1'
}}, type=float)
run_fields_optional.add_argument("-time", metavar="Incubation time, sec", default=900, gooey_options={'validator':{'test': '0 <= int(user_input)',
'message': 'Must be a positive number'
}}, type=int)
run_fields_optional.add_argument ("-speed_in", metavar="Speed in, mL/min", default=0.5, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
run_fields_optional.add_argument ("-speed_out", metavar="Speed out, mL/min", default=1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
run_fields_optional.add_argument ("-speed_reagent", metavar="Speed reagent, mL/min", default=1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 5',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
run_fields_optional.add_argument ("-uv_time", metavar="Time of UV measurement, sec", default=2, gooey_options={'validator':{'test': '0 <= float(user_input) <= 10',
'message': 'Must be between 0 and 10 sec'
}}, type=float)
run_fields_optional.add_argument ("-mixing_time", metavar="Time of mixing between in/out, sec", default=30, gooey_options={'validator':{'test': '0 <= float(user_input) <= 300',
'message': 'Must be between 0 and 300 sec'
}}, type=float)
manual_fields.add_argument("-reac", metavar="Reactor syringe, uL", default=0, gooey_options={'validator':{'test': '0 <= int(user_input) <= 10000',
'message': 'Must be between 0 and 10000 uL'
}}, type=float)
manual_fields.add_argument ("-reac_speed", metavar="Speed reactor syringe, mL/min", default=0.5, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 5',
'message': 'Must be between 0.05 and 5 ml/min'
}}, type=float)
manual_fields.add_argument("-reac_dir", metavar="Direction (reactor)",choices=['in','out'],default='in')
manual_fields.add_argument("-reag", metavar="Reagent syringe, uL", default=0, gooey_options={'validator':{'test': '0 <= int(user_input) <= 1000',
'message': 'Must be between 0 and 1000 uL'
}}, type=float)
manual_fields.add_argument ("-reag_speed", metavar="Speed reagent syringe, mL/min", default=0.5, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 5',
'message': 'Must be between 0.05 and 5 ml/min'
}}, type=float)
manual_fields.add_argument("-reag_dir", metavar="Direction (reagent)",choices=['in','out'],default='in')
manual_fields.add_argument ("-manual_mixing", metavar="Mixing time, sec", default=0, gooey_options={'validator':{'test': '0 <= float(user_input) <= 6000',
'message': 'Must be between 0 and 6000 sec'
}}, type=float)
manual_fields.add_argument("-manual_needle", metavar="Move needle",choices=['up','down'])
manual_fields.add_argument("-manual_uv", metavar="UV", default=False, action="store_true")
cal_parser = subparsers.add_parser('UV')
cal_fields = cal_parser.add_argument_group('UV Detector Calibration', 'Indicate standard concentrations below (BSA, mg/ml)\nIf no concentration provided, standard will be used', gooey_options={'columns': 6})
for i in range (6):
cal_fields.add_argument ("-conc_{}".format(i+1), metavar="#{}".format(i+1), type=str)
release_fields.add_argument("name", metavar="Experiment name", type=str)
release_fields.add_argument("-rvol", metavar="Reactor volume, uL", default=5000, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000',
'message': 'Must be between 1 and 10000 uL'
}}, type=int)
release_fields.add_argument("-time", metavar="Time of the release, min", default=300, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000',
'message': 'Must be between 1 and 10000 minutes'
}}, type=int)
release_fields.add_argument ("-speed_in", metavar="Speed in, mL/min", default=0.1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
release_fields.add_argument ("-speed_out", metavar="Speed out, mL/min", default=0.1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
release_fields.add_argument("-uv", metavar="UV", default=False, action="store_true")
args = parser.parse_args()
return args
if __name__ == '__main__':
conf = parse_args()
if conf.subparser_name == 'Run':
module_run.run(conf)
elif conf.subparser_name == 'Home':
antibody,reagent,needle=get_positions()
module_home.positions(antibody,reagent,needle)
module_home.home(conf)
elif conf.subparser_name == 'Rvol':
module_rvol.volume(conf)
elif conf.subparser_name == 'Manual':
antibody,reagent,needle=get_positions()
module_run.manual(conf,antibody,reagent,needle)
elif conf.subparser_name == 'UV':
module_calibrate.cal(conf)
elif conf.subparser_name == 'Pre-run':
module_run.prerun(conf)
elif conf.subparser_name == 'Release':
antibody,reagent,needle=get_positions()
module_run.release(conf,antibody,reagent,needle)
|
the-stack_106_16798
|
'''
=========================
Automatic Text Offsetting
=========================
This example demonstrates mplot3d's offset text display.
As one rotates the 3D figure, the offsets should remain oriented the
same way as the axis label, and should also be located "away"
from the center of the plot.
This demo triggers the display of the offset text for the x and
y axis by adding 1e5 to X and Y. Anything less would not
automatically trigger it.
'''
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.mgrid[0:6*np.pi:0.25, 0:4*np.pi:0.25]
Z = np.sqrt(np.abs(np.cos(X) + np.cos(Y)))
ax.plot_surface(X + 1e5, Y + 1e5, Z, cmap='autumn', cstride=2, rstride=2)
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.set_zlabel("Z label")
ax.set_zlim(0, 2)
plt.show()
|
the-stack_106_16800
|
"""Objects that define the various meta-parameters of an experiment."""
import logging
import collections
from flow.utils.flow_warnings import deprecated_attribute
from flow.controllers.car_following_models import SimCarFollowingController
from flow.controllers.rlcontroller import RLController
from flow.controllers.lane_change_controllers import SimLaneChangeController
SPEED_MODES = {
"aggressive": 0,
"obey_safe_speed": 1,
"no_collide": 7,
"right_of_way": 25,
"all_checks": 31
}
LC_MODES = {"aggressive": 0, "no_lat_collide": 512, "strategic": 1621}
# Traffic light defaults
PROGRAM_ID = 1
MAX_GAP = 3.0
DETECTOR_GAP = 0.6
SHOW_DETECTORS = True
class TrafficLightParams:
"""Base traffic light.
This class is used to place traffic lights in the network and describe
the state of these traffic lights. In addition, this class supports
modifying the states of certain lights via TraCI.
"""
def __init__(self, baseline=False):
"""Instantiate base traffic light.
Attributes
----------
baseline: bool
"""
# traffic light xml properties
self.__tls_properties = dict()
# all traffic light parameters are set to default baseline values
self.baseline = baseline
def add(self,
node_id,
tls_type="static",
programID=10,
offset=None,
phases=None,
maxGap=None,
detectorGap=None,
showDetectors=None,
file=None,
freq=None):
"""Add a traffic light component to the network.
When generating networks using xml files, using this method to add a
traffic light will explicitly place the traffic light in the requested
node of the generated network.
If traffic lights are not added here but are already present in the
network (e.g. through a prebuilt net.xml file), then the traffic light
class will identify and add them separately.
Parameters
----------
node_id : str
name of the node with traffic lights
tls_type : str, optional
type of the traffic light (see Note)
programID : str, optional
id of the traffic light program (see Note)
offset : int, optional
initial time offset of the program
phases : list of dict, optional
list of phases to be followed by the traffic light, defaults
to default sumo traffic light behavior. Each element in the list
must consist of a dict with two keys:
* "duration": length of the current phase cycle (in sec)
* "state": string consist the sequence of states in the phase
* "minDur": optional
The minimum duration of the phase when using type actuated
* "maxDur": optional
The maximum duration of the phase when using type actuated
maxGap : int, optional
describes the maximum time gap between successive vehicle that will
cause the current phase to be prolonged, **used for actuated
traffic lights**
detectorGap : int, optional
used for actuated traffic lights
determines the time distance between the (automatically generated)
detector and the stop line in seconds (at each lanes maximum
speed), **used for actuated traffic lights**
showDetectors : bool, optional
toggles whether or not detectors are shown in sumo-gui, **used for
actuated traffic lights**
file : str, optional
which file the detector shall write results into
freq : int, optional
the period over which collected values shall be aggregated
Note
----
For information on defining traffic light properties, see:
http://sumo.dlr.de/wiki/Simulation/Traffic_Lights#Defining_New_TLS-Programs
"""
# prepare the data needed to generate xml files
self.__tls_properties[node_id] = {"id": node_id, "type": tls_type}
if programID:
self.__tls_properties[node_id]["programID"] = programID
if offset:
self.__tls_properties[node_id]["offset"] = offset
if phases:
self.__tls_properties[node_id]["phases"] = phases
if tls_type == "actuated":
# Required parameters
self.__tls_properties[node_id]["max-gap"] = \
maxGap if maxGap else MAX_GAP
self.__tls_properties[node_id]["detector-gap"] = \
detectorGap if detectorGap else DETECTOR_GAP
self.__tls_properties[node_id]["show-detectors"] = \
showDetectors if showDetectors else SHOW_DETECTORS
# Optional parameters
if file:
self.__tls_properties[node_id]["file"] = file
if freq:
self.__tls_properties[node_id]["freq"] = freq
def get_properties(self):
"""Return traffic light properties.
This is meant to be used by the generator to import traffic light data
to the .net.xml file
"""
return self.__tls_properties
def actuated_default(self):
"""Return the default values for an actuated network.
An actuated network is a network for a system where
all junctions are actuated traffic lights.
Returns
-------
tl_logic : dict
traffic light logic
"""
tl_type = "actuated"
program_id = 1
max_gap = 3.0
detector_gap = 0.8
show_detectors = True
phases = [{
"duration": "31",
"minDur": "8",
"maxDur": "45",
"state": "GrGr"
}, {
"duration": "6",
"minDur": "3",
"maxDur": "6",
"state": "yryr"
}, {
"duration": "31",
"minDur": "8",
"maxDur": "45",
"state": "rGrG"
}, {
"duration": "6",
"minDur": "3",
"maxDur": "6",
"state": "ryry"
}]
return {
"tl_type": str(tl_type),
"program_id": str(program_id),
"max_gap": str(max_gap),
"detector_gap": str(detector_gap),
"show_detectors": show_detectors,
"phases": phases
}
class VehicleParams:
"""Base vehicle class.
This is used to describe the state of all vehicles in the network.
State information on the vehicles for a given time step can be set or
retrieved from this class.
"""
def __init__(self):
"""Instantiate the base vehicle class."""
self.ids = [] # ids of all vehicles
# vehicles: Key = Vehicle ID, Value = Dictionary describing the vehicle
# Ordered dictionary used to keep neural net inputs in order
self.__vehicles = collections.OrderedDict()
#: total number of vehicles in the network
self.num_vehicles = 0
#: int : number of rl vehicles in the network
self.num_rl_vehicles = 0
#: int : number of unique types of vehicles in the network
self.num_types = 0
#: list of str : types of vehicles in the network
self.types = []
#: dict (str, str) : contains the parameters associated with each type
#: of vehicle
self.type_parameters = dict()
#: dict (str, int) : contains the minGap attribute of each type of
#: vehicle
self.minGap = dict()
#: list : initial state of the vehicles class, used for serialization
#: purposes
self.initial = []
def add(self,
veh_id,
acceleration_controller=(SimCarFollowingController, {}),
lane_change_controller=(SimLaneChangeController, {}),
routing_controller=None,
initial_speed=0,
num_vehicles=0,
car_following_params=None,
lane_change_params=None):
"""Add a sequence of vehicles to the list of vehicles in the network.
Parameters
----------
veh_id : str
base vehicle ID for the vehicles (will be appended by a number)
acceleration_controller : tup, optional
1st element: flow-specified acceleration controller
2nd element: controller parameters (may be set to None to maintain
default parameters)
lane_change_controller : tup, optional
1st element: flow-specified lane-changer controller
2nd element: controller parameters (may be set to None to maintain
default parameters)
routing_controller : tup, optional
1st element: flow-specified routing controller
2nd element: controller parameters (may be set to None to maintain
default parameters)
initial_speed : float, optional
initial speed of the vehicles being added (in m/s)
num_vehicles : int, optional
number of vehicles of this type to be added to the network
car_following_params : flow.core.params.SumoCarFollowingParams
Params object specifying attributes for Sumo car following model.
lane_change_params : flow.core.params.SumoLaneChangeParams
Params object specifying attributes for Sumo lane changing model.
"""
if car_following_params is None:
# FIXME: depends on simulator
car_following_params = SumoCarFollowingParams()
if lane_change_params is None:
# FIXME: depends on simulator
lane_change_params = SumoLaneChangeParams()
type_params = {}
type_params.update(car_following_params.controller_params)
type_params.update(lane_change_params.controller_params)
# This dict will be used when trying to introduce new vehicles into
# the network via a Flow. It is passed to the vehicle kernel object
# during environment instantiation.
self.type_parameters[veh_id] = \
{"acceleration_controller": acceleration_controller,
"lane_change_controller": lane_change_controller,
"routing_controller": routing_controller,
"initial_speed": initial_speed,
"car_following_params": car_following_params,
"lane_change_params": lane_change_params}
# TODO: delete?
self.initial.append({
"veh_id":
veh_id,
"acceleration_controller":
acceleration_controller,
"lane_change_controller":
lane_change_controller,
"routing_controller":
routing_controller,
"initial_speed":
initial_speed,
"num_vehicles":
num_vehicles,
"car_following_params":
car_following_params,
"lane_change_params":
lane_change_params
})
# This is used to return the actual headways from the vehicles class.
# It is passed to the vehicle kernel class during environment
# instantiation.
self.minGap[veh_id] = type_params["minGap"]
for i in range(num_vehicles):
v_id = veh_id + '_%d' % i
# add the vehicle to the list of vehicle ids
self.ids.append(v_id)
self.__vehicles[v_id] = dict()
# specify the type
self.__vehicles[v_id]["type"] = veh_id
# update the number of vehicles
self.num_vehicles += 1
if acceleration_controller[0] == RLController:
self.num_rl_vehicles += 1
# increase the number of unique types of vehicles in the network, and
# add the type to the list of types
self.num_types += 1
self.types.append({"veh_id": veh_id, "type_params": type_params})
def get_type(self, veh_id):
"""Return the type of a specified vehicle.
Parameters
----------
veh_id : str
vehicle ID whose type the user is querying
"""
return self.__vehicles[veh_id]["type"]
class CustomVehicleParams:
"""Base vehicle class.
This is used to describe the state of all vehicles in the network.
State information on the vehicles for a given time step can be set or
retrieved from this class.
"""
def __init__(self):
"""Instantiate the base vehicle class."""
self.ids = [] # ids of all vehicles
# vehicles: Key = Vehicle ID, Value = Dictionary describing the vehicle
# Ordered dictionary used to keep neural net inputs in order
self.__vehicles = collections.OrderedDict()
#: total number of vehicles in the network
self.num_vehicles = 0
#: int : number of rl vehicles in the network
self.num_rl_vehicles = 0
#: int : number of unique types of vehicles in the network
self.num_types = 0
#: list of str : types of vehicles in the network
self.types = []
#: dict (str, str) : contains the parameters associated with each type
#: of vehicle
self.type_parameters = dict()
#: dict (str, int) : contains the minGap attribute of each type of
#: vehicle
self.minGap = dict()
#: list : initial state of the vehicles class, used for serialization
#: purposes
self.initial = []
def add(self,
veh_id,
acceleration_controller=(SimCarFollowingController, {}),
lane_change_controller=(SimLaneChangeController, {}),
routing_controller=None,
initial_speed=0,
num_vehicles=0,
car_following_params=None,
lane_change_params=None,
cooperative_weight=0):
"""Add a sequence of vehicles to the list of vehicles in the network.
Parameters
----------
veh_id : str
base vehicle ID for the vehicles (will be appended by a number)
acceleration_controller : tup, optional
1st element: flow-specified acceleration controller
2nd element: controller parameters (may be set to None to maintain
default parameters)
lane_change_controller : tup, optional
1st element: flow-specified lane-changer controller
2nd element: controller parameters (may be set to None to maintain
default parameters)
routing_controller : tup, optional
1st element: flow-specified routing controller
2nd element: controller parameters (may be set to None to maintain
default parameters)
initial_speed : float, optional
initial speed of the vehicles being added (in m/s)
num_vehicles : int, optional
number of vehicles of this type to be added to the network
car_following_params : flow.core.params.SumoCarFollowingParams
Params object specifying attributes for Sumo car following model.
lane_change_params : flow.core.params.SumoLaneChangeParams
Params object specifying attributes for Sumo lane changing model.
cooperative_weight: int, optional
the percentage of cooperativiness the car takes into account when driving (used for RL agents)
"""
if car_following_params is None:
# FIXME: depends on simulator
car_following_params = SumoCarFollowingParams()
if lane_change_params is None:
# FIXME: depends on simulator
lane_change_params = SumoLaneChangeParams()
type_params = {}
type_params.update(car_following_params.controller_params)
type_params.update(lane_change_params.controller_params)
# This dict will be used when trying to introduce new vehicles into
# the network via a Flow. It is passed to the vehicle kernel object
# during environment instantiation.
self.type_parameters[veh_id] = \
{"acceleration_controller": acceleration_controller,
"lane_change_controller": lane_change_controller,
"routing_controller": routing_controller,
"initial_speed": initial_speed,
"car_following_params": car_following_params,
"lane_change_params": lane_change_params,
"cooperative_weight":cooperative_weight}
# TODO: delete?
self.initial.append({
"veh_id":
veh_id,
"acceleration_controller":
acceleration_controller,
"lane_change_controller":
lane_change_controller,
"routing_controller":
routing_controller,
"initial_speed":
initial_speed,
"num_vehicles":
num_vehicles,
"car_following_params":
car_following_params,
"lane_change_params":
lane_change_params,
"cooperative_weight": cooperative_weight,
})
# This is used to return the actual headways from the vehicles class.
# It is passed to the vehicle kernel class during environment
# instantiation.
self.minGap[veh_id] = type_params["minGap"]
for i in range(num_vehicles):
v_id = veh_id + '_%d' % i
# add the vehicle to the list of vehicle ids
self.ids.append(v_id)
self.__vehicles[v_id] = dict()
# specify the type
self.__vehicles[v_id]["type"] = veh_id
# update the number of vehicles
self.num_vehicles += 1
if acceleration_controller[0] == RLController:
self.num_rl_vehicles += 1
# increase the number of unique types of vehicles in the network, and
# add the type to the list of types
self.num_types += 1
self.types.append({"veh_id": veh_id, "type_params": type_params})
def get_type(self, veh_id):
"""Return the type of a specified vehicle.
Parameters
----------
veh_id : str
vehicle ID whose type the user is querying
"""
return self.__vehicles[veh_id]["type"]
class SimParams(object):
"""Simulation-specific parameters.
All subsequent parameters of the same type must extend this.
Attributes
----------
sim_step : float optional
seconds per simulation step; 0.1 by default
render : str or bool, optional
specifies whether to visualize the rollout(s)
* False: no rendering
* True: delegate rendering to sumo-gui for back-compatibility
* "gray": static grayscale rendering, which is good for training
* "dgray": dynamic grayscale rendering
* "rgb": static RGB rendering
* "drgb": dynamic RGB rendering, which is good for visualization
restart_instance : bool, optional
specifies whether to restart a simulation upon reset. Restarting
the instance helps avoid slowdowns cause by excessive inflows over
large experiment runtimes, but also require the gui to be started
after every reset if "render" is set to True.
emission_path : str, optional
Path to the folder in which to create the emissions output.
Emissions output is not generated if this value is not specified
save_render : bool, optional
specifies whether to save rendering data to disk
sight_radius : int, optional
sets the radius of observation for RL vehicles (meter)
show_radius : bool, optional
specifies whether to render the radius of RL observation
pxpm : int, optional
specifies rendering resolution (pixel / meter)
color_vehicles : bool, optional
whether or not to automatically color vehicles according to their types
"""
def __init__(self,
sim_step=0.1,
render=False,
restart_instance=False,
emission_path=None,
save_render=False,
sight_radius=25,
show_radius=False,
pxpm=2,
color_vehicles=True):
"""Instantiate SimParams."""
self.sim_step = sim_step
self.render = render
self.restart_instance = restart_instance
self.emission_path = emission_path
self.save_render = save_render
self.sight_radius = sight_radius
self.pxpm = pxpm
self.show_radius = show_radius
self.color_vehicles = color_vehicles
class AimsunParams(SimParams):
"""Aimsun-specific simulation parameters.
Extends SimParams.
Attributes
----------
sim_step : float optional
seconds per simulation step; 0.1 by default
render : str or bool, optional
specifies whether to visualize the rollout(s)
* False: no rendering
* True: delegate rendering to sumo-gui for back-compatibility
* "gray": static grayscale rendering, which is good for training
* "dgray": dynamic grayscale rendering
* "rgb": static RGB rendering
* "drgb": dynamic RGB rendering, which is good for visualization
restart_instance : bool, optional
specifies whether to restart a simulation upon reset. Restarting
the instance helps avoid slowdowns cause by excessive inflows over
large experiment runtimes, but also require the gui to be started
after every reset if "render" is set to True.
emission_path : str, optional
Path to the folder in which to create the emissions output.
Emissions output is not generated if this value is not specified
save_render : bool, optional
specifies whether to save rendering data to disk
sight_radius : int, optional
sets the radius of observation for RL vehicles (meter)
show_radius : bool, optional
specifies whether to render the radius of RL observation
pxpm : int, optional
specifies rendering resolution (pixel / meter)
network_name : str, optional
name of the network generated in Aimsun.
experiment_name : str, optional
name of the experiment generated in Aimsun
replication_name : str, optional
name of the replication generated in Aimsun. When loading
an Aimsun template, this parameter must be set to the name
of the replication to be run by the simulation; in this case,
the network_name and experiment_name parameters are not
necessary as they will be obtained from the replication name.
centroid_config_name : str, optional
name of the centroid configuration to load in Aimsun. This
parameter is only used when loading an Aimsun template,
not when generating one.
subnetwork_name : str, optional
name of the subnetwork to load in Aimsun. This parameter is not
used when generating a network; it can be used when loading an
Aimsun template containing a subnetwork in order to only load
the objects contained in this subnetwork. If set to None or if the
specified subnetwork does not exist, the whole network will be loaded.
"""
def __init__(self,
sim_step=0.1,
render=False,
restart_instance=False,
emission_path=None,
save_render=False,
sight_radius=25,
show_radius=False,
pxpm=2,
# set to match Flow_Aimsun.ang's scenario name
network_name="Dynamic Scenario 866",
# set to match Flow_Aimsun.ang's experiment name
experiment_name="Micro SRC Experiment 867",
# set to match Flow_Aimsun.ang's replication name
replication_name="Replication 870",
centroid_config_name=None,
subnetwork_name=None):
"""Instantiate AimsunParams."""
super(AimsunParams, self).__init__(
sim_step, render, restart_instance, emission_path, save_render,
sight_radius, show_radius, pxpm)
self.network_name = network_name
self.experiment_name = experiment_name
self.replication_name = replication_name
self.centroid_config_name = centroid_config_name
self.subnetwork_name = subnetwork_name
class SumoParams(SimParams):
"""Sumo-specific simulation parameters.
Extends SimParams.
These parameters are used to customize a sumo simulation instance upon
initialization. This includes passing the simulation step length,
specifying whether to use sumo's gui during a run, and other features
described in the Attributes below.
Attributes
----------
port : int, optional
Port for Traci to connect to; finds an empty port by default
sim_step : float optional
seconds per simulation step; 0.1 by default
emission_path : str, optional
Path to the folder in which to create the emissions output.
Emissions output is not generated if this value is not specified
lateral_resolution : float, optional
width of the divided sublanes within a lane, defaults to None (i.e.
no sublanes). If this value is specified, the vehicle in the
network cannot use the "LC2013" lane change model.
no_step_log : bool, optional
specifies whether to add sumo's step logs to the log file, and
print them into the terminal during runtime, defaults to True
render : str or bool, optional
specifies whether to visualize the rollout(s)
* False: no rendering
* True: delegate rendering to sumo-gui for back-compatibility
* "gray": static grayscale rendering, which is good for training
* "dgray": dynamic grayscale rendering
* "rgb": static RGB rendering
* "drgb": dynamic RGB rendering, which is good for visualization
save_render : bool, optional
specifies whether to save rendering data to disk
sight_radius : int, optional
sets the radius of observation for RL vehicles (meter)
show_radius : bool, optional
specifies whether to render the radius of RL observation
pxpm : int, optional
specifies rendering resolution (pixel / meter)
overtake_right : bool, optional
whether vehicles are allowed to overtake on the right as well as
the left
seed : int, optional
seed for sumo instance
restart_instance : bool, optional
specifies whether to restart a sumo instance upon reset. Restarting
the instance helps avoid slowdowns cause by excessive inflows over
large experiment runtimes, but also require the gui to be started
after every reset if "render" is set to True.
print_warnings : bool, optional
If set to false, this will silence sumo warnings on the stdout
teleport_time : int, optional
If negative, vehicles don't teleport in gridlock. If positive,
they teleport after teleport_time seconds
num_clients : int, optional
Number of clients that will connect to Traci
"""
def __init__(self,
port=None,
sim_step=0.1,
emission_path=None,
lateral_resolution=None,
no_step_log=True,
render=False,
save_render=False,
sight_radius=25,
show_radius=False,
pxpm=2,
overtake_right=False,
seed=None,
restart_instance=False,
print_warnings=True,
teleport_time=-1,
num_clients=1):
"""Instantiate SumoParams."""
super(SumoParams, self).__init__(
sim_step, render, restart_instance, emission_path, save_render,
sight_radius, show_radius, pxpm)
self.port = port
self.lateral_resolution = lateral_resolution
self.no_step_log = no_step_log
self.seed = seed
self.overtake_right = overtake_right
self.print_warnings = print_warnings
self.teleport_time = teleport_time
self.num_clients = num_clients
class EnvParams:
"""Environment and experiment-specific parameters.
This includes specifying the bounds of the action space and relevant
coefficients to the reward function, as well as specifying how the
positions of vehicles are modified in between rollouts.
Attributes
----------
additional_params : dict, optional
Specify additional environment params for a specific
environment configuration
horizon : int, optional
number of steps per rollouts
warmup_steps : int, optional
number of steps performed before the initialization of training
during a rollout. These warmup steps are not added as steps
into training, and the actions of rl agents during these steps
are dictated by sumo. Defaults to zero
sims_per_step : int, optional
number of sumo simulation steps performed in any given rollout
step. RL agents perform the same action for the duration of
these simulation steps.
evaluate : bool, optional
flag indicating that the evaluation reward should be used
so the evaluation reward should be used rather than the
normal reward
clip_actions : bool, optional
specifies whether to clip actions from the policy by their range when
they are inputted to the reward function. Note that the actions are
still clipped before they are provided to `apply_rl_actions`.
"""
def __init__(self,
additional_params=None,
horizon=float('inf'),
warmup_steps=0,
sims_per_step=1,
evaluate=False,
clip_actions=True):
"""Instantiate EnvParams."""
self.additional_params = \
additional_params if additional_params is not None else {}
self.horizon = horizon
self.warmup_steps = warmup_steps
self.sims_per_step = sims_per_step
self.evaluate = evaluate
self.clip_actions = clip_actions
def get_additional_param(self, key):
"""Return a variable from additional_params."""
return self.additional_params[key]
class NetParams:
"""Network configuration parameters.
Unlike most other parameters, NetParams may vary drastically dependent
on the specific network configuration. For example, for the ring road
the network parameters will include a characteristic length, number of
lanes, and speed limit.
In order to determine which additional_params variable may be needed
for a specific network, refer to the ADDITIONAL_NET_PARAMS variable
located in the network file.
Attributes
----------
inflows : InFlows type, optional
specifies the inflows of specific edges and the types of vehicles
entering the network from these edges
osm_path : str, optional
path to the .osm file that should be used to generate the network
configuration files
template : str, optional
path to the network template file that can be used to instantiate a
netowrk in the simulator of choice
additional_params : dict, optional
network specific parameters; see each subclass for a description of
what is needed
"""
def __init__(self,
inflows=None,
osm_path=None,
template=None,
additional_params=None):
"""Instantiate NetParams."""
self.inflows = inflows or InFlows()
self.osm_path = osm_path
self.template = template
self.additional_params = additional_params or {}
class InitialConfig:
"""Initial configuration parameters.
These parameters that affect the positioning of vehicle in the
network at the start of a rollout. By default, vehicles are uniformly
distributed in the network.
Attributes
----------
shuffle : bool, optional # TODO: remove
specifies whether the ordering of vehicles in the Vehicles class
should be shuffled upon initialization.
spacing : str, optional
specifies the positioning of vehicles in the network relative to
one another. May be one of: "uniform", "random", or "custom".
Default is "uniform".
min_gap : float, optional # TODO: remove
minimum gap between two vehicles upon initialization, in meters.
Default is 0 m.
x0 : float, optional # TODO: remove
position of the first vehicle to be placed in the network
perturbation : float, optional
standard deviation used to perturb vehicles from their uniform
position, in meters. Default is 0 m.
bunching : float, optional
reduces the portion of the network that should be filled with
vehicles by this amount.
lanes_distribution : int, optional
number of lanes vehicles should be dispersed into. If the value is
greater than the total number of lanes on an edge, vehicles are
spread across all lanes.
edges_distribution : str or list of str or dict, optional
edges vehicles may be placed on during initialization, may be one
of:
* "all": vehicles are distributed over all edges
* list of edges: list of edges vehicles can be distributed over
* dict of edges: where the key is the name of the edge to be
utilized, and the elements are the number of cars to place on
each edge
additional_params : dict, optional
some other network-specific params
"""
def __init__(self,
shuffle=False,
spacing="uniform",
min_gap=0,
perturbation=0.0,
x0=0,
bunching=0,
lanes_distribution=float("inf"),
edges_distribution="all",
additional_params=None):
"""Instantiate InitialConfig.
These parameters that affect the positioning of vehicle in the
network at the start of a rollout. By default, vehicles are uniformly
distributed in the network.
"""
self.shuffle = shuffle
self.spacing = spacing
self.min_gap = min_gap
self.perturbation = perturbation
self.x0 = x0
self.bunching = bunching
self.lanes_distribution = lanes_distribution
self.edges_distribution = edges_distribution
self.additional_params = additional_params or dict()
class SumoCarFollowingParams:
"""Parameters for sumo-controlled acceleration behavior.
Attributes
----------
speed_mode : str or int, optional
may be one of the following:
* "right_of_way" (default): respect safe speed, right of way and
brake hard at red lights if needed. DOES NOT respect
max accel and decel which enables emergency stopping.
Necessary to prevent custom models from crashing
* "obey_safe_speed": prevents vehicles from colliding
longitudinally, but can fail in cases where vehicles are allowed
to lane change
* "no_collide": Human and RL cars are preventing from reaching
speeds that may cause crashes (also serves as a failsafe). Note:
this may lead to collisions in complex networks
* "aggressive": Human and RL cars are not limited by sumo with
regard to their accelerations, and can crash longitudinally
* "all_checks": all sumo safety checks are activated
* int values may be used to define custom speed mode for the given
vehicles, specified at:
http://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#speed_mode_.280xb3.29
accel : float
see Note
decel : float
see Note
sigma : float
see Note
tau : float
see Note
min_gap : float
see minGap Note
max_speed : float
see maxSpeed Note
speed_factor : float
see speedFactor Note
speed_dev : float
see speedDev in Note
impatience : float
see Note
car_follow_model : str
see carFollowModel in Note
kwargs : dict
used to handle deprecations
Note
----
For a description of all params, see:
http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes
"""
def __init__(
self,
speed_mode='right_of_way',
accel=2.6,
decel=4.5,
sigma=0.5,
tau=1.0, # past 1 at sim_step=0.1 you no longer see waves
min_gap=2.5,
max_speed=30,
speed_factor=1.0,
speed_dev=0.1,
impatience=0.5,
car_follow_model="IDM",
**kwargs):
"""Instantiate SumoCarFollowingParams."""
# check for deprecations (minGap)
if "minGap" in kwargs:
deprecated_attribute(self, "minGap", "min_gap")
min_gap = kwargs["minGap"]
# check for deprecations (maxSpeed)
if "maxSpeed" in kwargs:
deprecated_attribute(self, "maxSpeed", "max_speed")
max_speed = kwargs["maxSpeed"]
# check for deprecations (speedFactor)
if "speedFactor" in kwargs:
deprecated_attribute(self, "speedFactor", "speed_factor")
speed_factor = kwargs["speedFactor"]
# check for deprecations (speedDev)
if "speedDev" in kwargs:
deprecated_attribute(self, "speedDev", "speed_dev")
speed_dev = kwargs["speedDev"]
# check for deprecations (carFollowModel)
if "carFollowModel" in kwargs:
deprecated_attribute(self, "carFollowModel", "car_follow_model")
car_follow_model = kwargs["carFollowModel"]
# create a controller_params dict with all the specified parameters
self.controller_params = {
"accel": accel,
"decel": decel,
"sigma": sigma,
"tau": tau,
"minGap": min_gap,
"maxSpeed": max_speed,
"speedFactor": speed_factor,
"speedDev": speed_dev,
"impatience": impatience,
"carFollowModel": car_follow_model,
}
# adjust the speed mode value
if isinstance(speed_mode, str) and speed_mode in SPEED_MODES:
speed_mode = SPEED_MODES[speed_mode]
elif not (isinstance(speed_mode, int)
or isinstance(speed_mode, float)):
logging.error("Setting speed mode of to default.")
speed_mode = SPEED_MODES["obey_safe_speed"]
self.speed_mode = speed_mode
class SumoLaneChangeParams:
"""Parameters for sumo-controlled lane change behavior.
Attributes
----------
lane_change_mode : str or int, optional
may be one of the following:
* "no_lat_collide" (default): Human cars will not make lane
changes, RL cars can lane change into any space, no matter how
likely it is to crash
* "strategic": Human cars make lane changes in accordance with SUMO
to provide speed boosts
* "aggressive": RL cars are not limited by sumo with regard to
their lane-change actions, and can crash longitudinally
* int values may be used to define custom lane change modes for the
given vehicles, specified at:
http://sumo.dlr.de/wiki/TraCI/Change_Vehicle_State#lane_change_mode_.280xb6.29
model : str, optional
see laneChangeModel in Note
lc_strategic : float, optional
see lcStrategic in Note
lc_cooperative : float, optional
see lcCooperative in Note
lc_speed_gain : float, optional
see lcSpeedGain in Note
lc_keep_right : float, optional
see lcKeepRight in Note
lc_look_ahead_left : float, optional
see lcLookaheadLeft in Note
lc_speed_gain_right : float, optional
see lcSpeedGainRight in Note
lc_sublane : float, optional
see lcSublane in Note
lc_pushy : float, optional
see lcPushy in Note
lc_pushy_gap : float, optional
see lcPushyGap in Note
lc_assertive : float, optional
see lcAssertive in Note
lc_impatience : float, optional
see lcImpatience in Note
lc_time_to_impatience : float, optional
see lcTimeToImpatience in Note
lc_accel_lat : float, optional
see lcAccelLate in Note
kwargs : dict
used to handle deprecations
Note
----
For a description of all params, see:
http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes
"""
def __init__(self,
lane_change_mode="no_lat_collide",
model="LC2013",
lc_strategic=1.0,
lc_cooperative=1.0,
lc_speed_gain=1.0,
lc_keep_right=1.0,
lc_look_ahead_left=2.0,
lc_speed_gain_right=1.0,
lc_sublane=1.0,
lc_pushy=0,
lc_pushy_gap=0.6,
lc_assertive=1,
lc_impatience=0,
lc_time_to_impatience=float("inf"),
lc_accel_lat=1.0,
**kwargs):
"""Instantiate SumoLaneChangeParams."""
# check for deprecations (lcStrategic)
if "lcStrategic" in kwargs:
deprecated_attribute(self, "lcStrategic", "lc_strategic")
lc_strategic = kwargs["lcStrategic"]
# check for deprecations (lcCooperative)
if "lcCooperative" in kwargs:
deprecated_attribute(self, "lcCooperative", "lc_cooperative")
lc_cooperative = kwargs["lcCooperative"]
# check for deprecations (lcSpeedGain)
if "lcSpeedGain" in kwargs:
deprecated_attribute(self, "lcSpeedGain", "lc_speed_gain")
lc_speed_gain = kwargs["lcSpeedGain"]
# check for deprecations (lcKeepRight)
if "lcKeepRight" in kwargs:
deprecated_attribute(self, "lcKeepRight", "lc_keep_right")
lc_keep_right = kwargs["lcKeepRight"]
# check for deprecations (lcLookaheadLeft)
if "lcLookaheadLeft" in kwargs:
deprecated_attribute(self, "lcLookaheadLeft", "lc_look_ahead_left")
lc_look_ahead_left = kwargs["lcLookaheadLeft"]
# check for deprecations (lcSpeedGainRight)
if "lcSpeedGainRight" in kwargs:
deprecated_attribute(self, "lcSpeedGainRight",
"lc_speed_gain_right")
lc_speed_gain_right = kwargs["lcSpeedGainRight"]
# check for deprecations (lcSublane)
if "lcSublane" in kwargs:
deprecated_attribute(self, "lcSublane", "lc_sublane")
lc_sublane = kwargs["lcSublane"]
# check for deprecations (lcPushy)
if "lcPushy" in kwargs:
deprecated_attribute(self, "lcPushy", "lc_pushy")
lc_pushy = kwargs["lcPushy"]
# check for deprecations (lcPushyGap)
if "lcPushyGap" in kwargs:
deprecated_attribute(self, "lcPushyGap", "lc_pushy_gap")
lc_pushy_gap = kwargs["lcPushyGap"]
# check for deprecations (lcAssertive)
if "lcAssertive" in kwargs:
deprecated_attribute(self, "lcAssertive", "lc_assertive")
lc_assertive = kwargs["lcAssertive"]
# check for deprecations (lcImpatience)
if "lcImpatience" in kwargs:
deprecated_attribute(self, "lcImpatience", "lc_impatience")
lc_impatience = kwargs["lcImpatience"]
# check for deprecations (lcTimeToImpatience)
if "lcTimeToImpatience" in kwargs:
deprecated_attribute(self, "lcTimeToImpatience",
"lc_time_to_impatience")
lc_time_to_impatience = kwargs["lcTimeToImpatience"]
# check for deprecations (lcAccelLat)
if "lcAccelLat" in kwargs:
deprecated_attribute(self, "lcAccelLat", "lc_accel_lat")
lc_accel_lat = kwargs["lcAccelLat"]
# check for valid model
if model not in ["LC2013", "SL2015"]:
logging.error("Invalid lane change model! Defaulting to LC2013")
model = "LC2013"
if model == "LC2013":
self.controller_params = {
"laneChangeModel": model,
"lcStrategic": str(lc_strategic),
"lcCooperative": str(lc_cooperative),
"lcSpeedGain": str(lc_speed_gain),
"lcKeepRight": str(lc_keep_right),
# "lcLookaheadLeft": str(lcLookaheadLeft),
# "lcSpeedGainRight": str(lcSpeedGainRight)
}
elif model == "SL2015":
self.controller_params = {
"laneChangeModel": model,
"lcStrategic": str(lc_strategic),
"lcCooperative": str(lc_cooperative),
"lcSpeedGain": str(lc_speed_gain),
"lcKeepRight": str(lc_keep_right),
"lcLookaheadLeft": str(lc_look_ahead_left),
"lcSpeedGainRight": str(lc_speed_gain_right),
"lcSublane": str(lc_sublane),
"lcPushy": str(lc_pushy),
"lcPushyGap": str(lc_pushy_gap),
"lcAssertive": str(lc_assertive),
"lcImpatience": str(lc_impatience),
"lcTimeToImpatience": str(lc_time_to_impatience),
"lcAccelLat": str(lc_accel_lat)
}
# adjust the lane change mode value
if isinstance(lane_change_mode, str) and lane_change_mode in LC_MODES:
lane_change_mode = LC_MODES[lane_change_mode]
elif not (isinstance(lane_change_mode, int)
or isinstance(lane_change_mode, float)):
logging.error("Setting lane change mode to default.")
lane_change_mode = LC_MODES["no_lat_collide"]
self.lane_change_mode = lane_change_mode
class InFlows:
"""Used to add inflows to a network.
Inflows can be specified for any edge that has a specified route or routes.
"""
def __init__(self):
"""Instantiate Inflows."""
self.__flows = []
def add(self,
edge,
veh_type,
vehs_per_hour=None,
probability=None,
period=None,
depart_lane="first",
depart_speed=0,
name="flow",
begin=1,
end=86400,
number=None,
**kwargs):
r"""Specify a new inflow for a given type of vehicles and edge.
Parameters
----------
edge : str
starting edge for the vehicles in this inflow
veh_type : str
type of the vehicles entering the edge. Must match one of the types
set in the Vehicles class
vehs_per_hour : float, optional
number of vehicles per hour, equally spaced (in vehicles/hour).
Cannot be specified together with probability or period
probability : float, optional
probability for emitting a vehicle each second (between 0 and 1).
Cannot be specified together with vehs_per_hour or period
period : float, optional
insert equally spaced vehicles at that period (in seconds). Cannot
be specified together with vehs_per_hour or probability
depart_lane : int or str
the lane on which the vehicle shall be inserted. Can be either one
of:
* int >= 0: index of the lane (starting with rightmost = 0)
* "random": a random lane is chosen, but the vehicle insertion is
not retried if it could not be inserted
* "free": the most free (least occupied) lane is chosen
* "best": the "free" lane (see above) among those who allow the
vehicle the longest ride without the need to change lane
* "first": the rightmost lane the vehicle may use
Defaults to "first".
depart_speed : float or str
the speed with which the vehicle shall enter the network (in m/s)
can be either one of:
- float >= 0: the vehicle is tried to be inserted using the given
speed; if that speed is unsafe, departure is delayed
- "random": vehicles enter the edge with a random speed between 0
and the speed limit on the edge; the entering speed may be
adapted to ensure a safe distance to the leading vehicle is kept
- "speedLimit": vehicles enter the edge with the maximum speed that
is allowed on this edge; if that speed is unsafe, departure is
delayed
Defaults to 0.
name : str, optional
prefix for the id of the vehicles entering via this inflow.
Defaults to "flow"
begin : float, optional
first vehicle departure time (in seconds, minimum 1 second).
Defaults to 1 second
end : float, optional
end of departure interval (in seconds). This parameter is not taken
into account if 'number' is specified. Defaults to 24 hours
number : int, optional
total number of vehicles the inflow should create (due to rounding
up, this parameter may not be exactly enforced and shouldn't be set
too small). Default: infinite (c.f. 'end' parameter)
kwargs : dict, optional
see Note
Note
----
For information on the parameters start, end, vehs_per_hour,
probability, period, number, as well as other vehicle type and routing
parameters that may be added via \*\*kwargs, refer to:
http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes
"""
# check for deprecations
def deprecate(old, new):
deprecated_attribute(self, old, new)
new_val = kwargs[old]
del kwargs[old]
return new_val
if "vehsPerHour" in kwargs:
vehs_per_hour = deprecate("vehsPerHour", "vehs_per_hour")
if "departLane" in kwargs:
depart_lane = deprecate("departLane", "depart_lane")
if "departSpeed" in kwargs:
depart_speed = deprecate("departSpeed", "depart_speed")
new_inflow = {
"name": "%s_%d" % (name, len(self.__flows)),
"vtype": veh_type,
"edge": edge,
"departLane": depart_lane,
"departSpeed": depart_speed,
"begin": begin,
"end": end
}
new_inflow.update(kwargs)
inflow_params = [vehs_per_hour, probability, period]
n_inflow_params = len(inflow_params) - inflow_params.count(None)
if n_inflow_params != 1:
raise ValueError(
"Exactly one among the three parameters 'vehs_per_hour', "
"'probability' and 'period' must be specified in InFlows.add. "
"{} were specified.".format(n_inflow_params))
if probability is not None and (probability < 0 or probability > 1):
raise ValueError(
"Inflow.add called with parameter 'probability' set to {}, but"
" probability should be between 0 and 1.".format(probability))
if begin is not None and begin < 1:
raise ValueError(
"Inflow.add called with parameter 'begin' set to {}, but begin"
" should be greater or equal than 1 second.".format(begin))
if number is not None:
del new_inflow["end"]
new_inflow["number"] = number
if vehs_per_hour is not None:
new_inflow["vehsPerHour"] = vehs_per_hour
if probability is not None:
new_inflow["probability"] = probability
if period is not None:
new_inflow["period"] = period
self.__flows.append(new_inflow)
def get(self):
"""Return the inflows of each edge."""
return self.__flows
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.