gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import roslaunch
import rosparam
import subprocess
import imageio
from core.env import Env
from core.minisim.minisim_client import MinisimClient
from utils.helpers import Experience
from utils.options import EnvParams
# TODO: figure out logging
class MinisimEnv(Env):
initialized = False
minisim_path = None
roslaunch_map_server = None
roslaunch_node_starter = None
roscore = None
map_dir = None
def __init__(self, args, env_ind=0):
tmp = self._reset_experience
self._reset_experience = lambda: None
super(MinisimEnv, self).__init__(args, env_ind)
self._reset_experience = tmp
assert self.env_type == "minisim"
self.extras = None
self.num_robots = args.num_robots
self.curriculum = args.curriculum if hasattr(args, "curriculum") else False
self.randomize_maps = args.randomize_maps if hasattr(args, "randomize_maps") else False
self.randomize_targets = args.randomize_targets if hasattr(args, "randomize_targets") else False
self.penalize_staying = args.penalize_staying if hasattr(args, "penalize_staying") else False
self.penalize_angle_to_target = args.penalize_angle_to_target if hasattr(args,
"penalize_angle_to_target") else False
self.collision_is_terminal = args.collision_is_terminal if hasattr(args, "collision_is_terminal") else False
self.verbose_test = args.verbose_test if hasattr(args, "verbose_test") else False
self.mode = args.mode # 1(train) | 2(test model_file)
self.total_reward = 0
if self.mode == 2:
self.curriculum = False
# self.collision_is_terminal = False
self.sim_name = 'sim' + str(self.ind)
if not MinisimEnv.initialized:
self._init_roslaunch()
self.node = self._launch_node()
self.client = MinisimClient(
self.num_robots, self.seed, self.curriculum, self.mode,
self.randomize_targets, self.penalize_staying,
self.penalize_angle_to_target, self.collision_is_terminal,
'/' + self.sim_name, self.logger
)
self.client.setup() # TODO: move to client's init?
# action space setup # [linear velocity, angular velocity]
# seemed to be too small
# self.actions = [[0, 0], [1, 0], [-1, 0], [0, 1], [0, -1]] # ,[1, 1], [1, -1], [-1, 1], [-1, -1]]
# definitely too huge, only realised a few months in :(
# self.actions = [[0, 0], [10, 0], [-10, 0], [0, 16], [0, -16]] # ,[1, 1], [1, -1], [-1, 1], [-1, -1]]
# trying out
# self.actions = [[0, 0], [3, 0], [-3, 0], [0, 8], [0, -8]] # ,[1, 1], [1, -1], [-1, 1], [-1, -1]]
# trying out without the option to stand still and backwards movement
self.actions = [[3, 0], [0, 8], [0, -8]]
# try to promote more realistic behavior with slower backward movement?
# self.actions = [[0, 0], [3, 0], [-1, 0], [0, 8], [0, -8]] # ,[1, 1], [1, -1], [-1, 1], [-1, -1]]
self.logger.warning("Action Space: %s", self.actions)
# state space setup
self.logger.warning("State Space: %s", self.state_shape)
# continuous space
if args.agent_type == "a3c":
self.enable_continuous = args.enable_continuous
if args.enable_continuous:
self.logger.warning("Continuous actions not implemented for minisim yet")
else:
self.enable_continuous = False
# TODO: history is currently broken (however it was not useful according to the experiments anyway)
# it was harmful, even
if hasattr(args, "hist_len"):
self.hist_len = args.hist_len
self.state_buffer = np.zeros((self.hist_len, self.state_shape + 2 * self.num_robots))
else:
self.hist_len = 1
self._reset_experience()
def __del__(self):
if self.node is not None:
self.node.stop()
def _preprocessState(self, state):
return state
def _reset_experience(self):
super(MinisimEnv, self)._reset_experience()
self.extras = None
if self.hist_len > 1:
self.state_buffer[:] = 0
def _append_to_history(self, state):
for i in range(self.state_buffer.shape[0] - 1):
self.state_buffer[i, :] = self.state_buffer[i + 1, :]
self.state_buffer[-1, :] = state
@property
def state_shape(self):
return self.client.state_shape
@property
def action_dim(self):
return len(self.actions)
def render(self):
self.logger.warning("WARNING: asked to render minisim - user rviz instead")
def visual(self):
pass
def sample_random_action(self): # TODO: unused
return [self.actions[np.random.randint(0, len(self.actions))] for _ in xrange(self.num_robots)]
def _get_experience(self):
if self.hist_len == 1:
return Experience(state0=self.exp_state0, # NOTE: here state0 is always None
action=self.exp_action,
reward=self.exp_reward,
state1=self._preprocessState(self.exp_state1),
terminal1=self.exp_terminal1,
extras=self.extras)
else:
return Experience(state0=self.exp_state0, # NOTE: here state0 is always None
action=self.exp_action,
reward=self.exp_reward,
state1=self.state_buffer,
terminal1=self.exp_terminal1,
extras=self.extras)
def reset(self):
self._reset_experience()
self.exp_state1, self.extras = self.client.reset()
if self.hist_len > 1:
self._append_to_history(self._preprocessState(self.exp_state1))
self.total_reward = 0
return self._get_experience()
def step(self, action_index):
self.exp_action = action_index
if self.enable_continuous:
# TODO: not implemented
self.exp_state1, self.exp_reward, self.exp_terminal1, _ = self.client.step(self.exp_action)
else:
# enumerated action combinations
# print("actions taken:", [self.actions[i] for i in self._to_n_dim_idx(action_index, self.num_robots)])
# self.exp_state1, self.exp_reward, self.exp_terminal1, _ = self.client.step(
# [self.actions[i] for i in self._to_n_dim_idx(action_index, self.num_robots)]
# )
# unstructured reward
# self.exp_state1, self.exp_reward, self.exp_terminal1, _ = self.client.step(
# [self.actions[i] for i in action_index.reshape(-1)]
# )
# structured reward
self.exp_state1, self.exp_reward, self.exp_terminal1, self.extras, _ = self.client.step_structured(
[self.actions[i] for i in action_index.reshape(-1)]
)
if self.mode == 2:
# time.sleep(0.01)
self.total_reward += self.exp_reward
if self.verbose_test:
print('total reward: ', self.total_reward)
# print("actions: ", action_index)
if self.hist_len > 1:
self._append_to_history(self._preprocessState(self.exp_state1))
return self._get_experience()
def read_static_map_image(self):
# return imageio.imread(os.path.join(MinisimEnv.minisim_path, 'map', 'medium_rooms.pgm'))
# return imageio.imread(os.path.join(MinisimEnv.minisim_path,
# 'map', 'random', 'simple_gen_small_002.pgm'))
return imageio.imread(os.path.join(MinisimEnv.minisim_path,
'map', 'medium_rooms_simpler.pgm'))
# return imageio.imread(os.path.join(MinisimEnv.minisim_path,
# 'map', 'medium_rooms_new.pgm'))
# return imageio.imread(os.path.join(MinisimEnv.minisim_path,
# 'map', 'medium_rooms_new2.pgm'))
# return imageio.imread(os.path.join(MinisimEnv.minisim_path,
# 'map', 'medium_rooms_new3.pgm'))
# was supposed to be useful for a large network with a single action index output, which would
# be expanded into individual robot actions
def _to_n_dim_idx(self, idx, n_dims):
res = np.zeros(n_dims, dtype=np.int)
for i in range(n_dims):
sub = idx / len(self.actions) ** (n_dims - i - 1)
if i != n_dims - 1:
res[i] = sub
idx -= sub * len(self.actions) ** (n_dims - i - 1)
else:
res[i] = idx % len(self.actions)
return res
def _init_roslaunch(self):
rospack = roslaunch.rospkg.RosPack()
try:
minisim_path = rospack.get_path('minisim')
MinisimEnv.minisim_path = minisim_path
except roslaunch.rospkg.ResourceNotFound:
self.logger.warning("WARNING: minisim not found")
sys.exit(-1)
if not self.randomize_maps:
# TODO: find a way to provide the map file arg to the map_server launch file
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_small.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_small_simple.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_empty_small.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_simple_gen_small_002.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_medium_rooms.launch')
map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_medium_rooms_simpler.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_medium_rooms_new.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_medium_rooms_new2.launch')
# map_server_rlaunch_path = os.path.join(minisim_path, 'launch', 'map_server_medium_rooms_new3.launch')
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
MinisimEnv.roslaunch_map_server = roslaunch.parent.ROSLaunchParent(uuid, [map_server_rlaunch_path])
MinisimEnv.roslaunch_map_server.start()
else:
master = roslaunch.scriptapi.Master()
if not master.is_running():
MinisimEnv.roscore = subprocess.Popen('roscore')
rlaunch_path = os.path.join(minisim_path, 'launch', 'sim_srv_multimap.launch')
loader = roslaunch.xmlloader.XmlLoader(resolve_anon=False)
config = roslaunch.config.ROSLaunchConfig()
loader.load(rlaunch_path, config, verbose=False)
MinisimEnv.map_dir = config.params.values()[0].value
MinisimEnv.roslaunch_node_starter = roslaunch.scriptapi.ROSLaunch()
MinisimEnv.roslaunch_node_starter.start()
MinisimEnv.initialized = True
def _launch_node(self):
package = 'minisim'
executable = 'minisim_srv' if not self.randomize_maps else 'minisim_srv_standalone'
node = roslaunch.core.Node(package, executable, required=True, name=self.sim_name,
namespace=self.sim_name, output='screen')
if self.randomize_maps:
rosparam.set_param("/{0}/{0}/map_dir".format(self.sim_name), MinisimEnv.map_dir)
return MinisimEnv.roslaunch_node_starter.launch(node)
if __name__ == '__main__':
params = EnvParams()
env_0 = MinisimEnv(params, 0)
for i in range(50):
env_0.reset()
for j in xrange(np.random.randint(10, 100)):
env_0.step(np.random.randint(0, 3, size=(1, 1)))
# env_1 = MinisimEnv(params, 1)
# time.sleep(10000)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
HashMe.
Calculate multiple checksum digests reading each input file once.
"""
import hashlib
import os
import queue
import sys
import time
from multiprocessing import cpu_count
from queue import Queue
from threading import Thread
from argparse import ArgumentParser, RawDescriptionHelpFormatter
# Information and error messages:
def outln(line):
""" Write 'line' to stdout, using the platform encoding and newline format. """
print(line, flush = True)
def errln(line):
""" Write 'line' to stderr, using the platform encoding and newline format. """
print('HashMe.py: error:', line, file = sys.stderr, flush = True)
# Use FADVISE when available:
try:
from os import posix_fadvise, POSIX_FADV_SEQUENTIAL
def fadvise_sequential(descriptor):
""" Try to advise the kernel to read from 'descriptor' sequentially. """
try:
posix_fadvise(descriptor.fileno(), 0, 0, POSIX_FADV_SEQUENTIAL)
except:
pass
except ImportError:
def fadvise_sequential(descriptor):
""" No fadvise support. """
pass
# IO utils:
def walk_binary_file(filepath, buffer_size):
""" Yield 'buffer_size' bytes from 'filepath' until EOF. """
with open(filepath, 'rb') as descriptor:
fadvise_sequential(descriptor)
while True:
chunk = descriptor.read(buffer_size)
if chunk:
yield chunk
else:
break
def walk_binary_stdin(buffer_size):
""" Yield 'buffer_size' bytes from stdin until EOF. """
# sys.stdin is a TextIOWrapper instance, use the internal buffer:
descriptor = sys.stdin.buffer
while True:
chunk = descriptor.read(buffer_size)
if chunk:
yield chunk
else:
break
def walk_binary_file_or_stdin(filepath, buffer_size = 32768):
"""
Yield 'buffer_size' bytes from filepath until EOF, or from
standard input when 'filepath' is '-'.
"""
if filepath == '-':
return walk_binary_stdin(buffer_size)
else:
return walk_binary_file(filepath, buffer_size)
def utf8_bytes(string):
""" Convert 'string' to bytes using UTF-8. """
return bytes(string, 'UTF-8')
# For portability, all checksum output is done in bytes
# to avoid Python default encoding and automatic newline conversion:
BYTES_NEWLINES = {
'dos' : b'\r\n',
'mac' : b'\r',
'unix' : b'\n',
'system' : utf8_bytes(os.linesep),
}
def binary_file_writelines(filepath, lines, newline):
"""
Open 'filepath' in binary mode and write 'lines' (as bytes) to it
using the specified 'newline' format (as bytes).
"""
with open(filepath, mode = 'wb') as descriptor:
for line in lines:
descriptor.write(line)
descriptor.write(newline)
def binary_stdout_writeline(line, newline):
"""
Write 'line' (as bytes) to stdout without buffering
using the specified 'newline' format (as bytes).
"""
sys.stdout.buffer.write(line)
sys.stdout.buffer.write(newline)
sys.stdout.flush()
# Threads, tasks and a thread pool:
class Worker(Thread):
"""
Thread that pops tasks from a '.todo' Queue, executes them, and puts
the completed tasks in a '.done' Queue.
A task is any object that has a run() method.
Tasks themselves are responsible to hold their own results.
"""
def __init__(self, todo, done):
super().__init__()
self.todo = todo
self.done = done
self.daemon = True
self.start()
def run(self):
while True:
task = self.todo.get()
task.run()
self.done.put(task)
self.todo.task_done()
class HashTask(object):
"""
A task that calculates multiple checksum algorithms for a given file
reading it once and storing the results in itself.
"""
def __init__(self, filepath, algorithms):
self.filepath = filepath
self.algorithms = algorithms
# will hold the computed digests after executing run():
self.digests = None
# since we run in a thread with its own context
# exception information is captured here:
self.exception = None
def run(self):
try:
instances = [hashlib.new(algorithm) for algorithm in self.algorithms]
for chunk in walk_binary_file_or_stdin(self.filepath):
for instance in instances:
instance.update(chunk)
self.digests = [instance.hexdigest() for instance in instances]
except:
self.exception = sys.exc_info()
class ThreadPool(object):
"""
Mantains a list of 'todo' and 'done' tasks and a number of threads
consuming the tasks. Child threads are expected to put the tasks
in the 'done' queue when those are completed.
"""
def __init__(self, threads):
self.threads = threads
self.tasks = []
self.results = set()
self.todo = Queue()
self.done = Queue()
def start(self, tasks):
""" Start computing tasks. """
self.tasks = tasks
for task in self.tasks:
self.todo.put(task)
for x in range(self.threads):
Worker(self.todo, self.done)
def wait_for_task(self):
""" Wait for one task to complete. """
while True:
try:
task = self.done.get(block = False)
self.results.add(task)
break
# give tasks processor time:
except queue.Empty:
time.sleep(0.1)
def poll_completed_tasks(self):
"""
Yield the computed tasks, in the order specified when 'start(tasks)'
was called, as soon as they are finished.
"""
for task in self.tasks:
while True:
if task in self.results:
yield task
break
else:
self.wait_for_task()
# at this point, all the tasks are completed:
self.todo.join()
# Parser:
def make_parser():
parser = ArgumentParser(
description = __doc__,
formatter_class = RawDescriptionHelpFormatter,
epilog = 'example: HashMe.py md5 sha1 -i *.iso -o md5sums sha1sums',
usage = 'HashMe.py algorithm [algorithm ...] [option [options ...]]',
)
# positional:
parser.add_argument('algorithms',
help = 'algorithms to compute for each file',
choices = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
nargs = '+')
# optional:
parser.add_argument('-i',
help = 'files to checksum (default: stdin)',
default = ['-'],
dest = 'input', metavar = 'file',
nargs = '+')
parser.add_argument('-o',
help = 'files to write computed checkums to (default: stdout)',
dest = 'output', metavar = 'file',
nargs = '+')
parser.add_argument('--newline',
help = 'use a specific newline mode (default: system)',
choices = ['dos', 'mac', 'unix', 'system'],
default = 'system')
parser.add_argument('--threads',
help = 'number of threads ("auto" for as many as CPUs, default: 1)',
default = '1')
return parser
# Running modes:
def run(filepaths, algorithms, threads):
"""
Create a thread pool and compute all the 'algorithms' for 'filepaths'
yielding the completed tasks. On error, print exception messages.
"""
pool = ThreadPool(threads)
tasks = [HashTask(filepath, algorithms) for filepath in filepaths]
pool.start(tasks)
for task in pool.poll_completed_tasks():
if task.exception:
exc_type, exc_obj, exc_trace = task.exception
errln('{}: unable to read, skipped: {}.'.format(task.filepath, exc_obj))
yield task
def run_stdout(filepaths, algorithms, threads, newline):
""" Print all the digests for 'filepaths' to stdout. """
status = 0
for task in run(filepaths, algorithms, threads):
if task.exception:
status = 1
else:
for digest in task.digests:
line = utf8_bytes('{} *{}'.format(digest, task.filepath))
binary_stdout_writeline(line, newline)
sys.exit(status)
def run_files(filepaths, algorithms, threads, newline, targets):
""" Write each algorithm digests to target files. """
status = 0
# compute digests and collect the result lines by algorithm:
lines = { algorithm: [] for algorithm in algorithms }
for task in run(filepaths, algorithms, threads):
if task.exception:
status = 1
else:
for digest, algorithm in zip(task.digests, task.algorithms):
line = utf8_bytes('{} *{}'.format(digest, task.filepath))
lines[algorithm].append(line)
# write to the target files:
for algorithm, target in zip(algorithms, targets):
current_lines = lines[algorithm]
if len(current_lines) > 0:
try:
binary_file_writelines(target, current_lines, newline)
except OSError as err:
errln('{}: unable to write, skipped: {}.'.format(target, err))
status = 1
sys.exit(status)
# Entry point:
def main():
parser = make_parser()
options = parser.parse_args()
algorithms = options.algorithms
filepaths = options.input
targets = options.output
threads = options.threads
newline = BYTES_NEWLINES[options.newline]
# parse --threads option:
if threads == 'auto':
try:
threads = cpu_count()
except NotImplementedError:
errln('unable to determine the number of CPUs on this system.')
sys.exit(1)
else:
try:
threads = int(threads)
if threads < 1:
errln('the number of threads must be positive.')
sys.exit(1)
except ValueError:
errln('--threads must be a positive integer or "auto".')
sys.exit(1)
# run to files or stdout:
if targets:
if len(targets) != len(algorithms):
errln('incorrect number of target files.')
sys.exit(1)
run_files(filepaths, algorithms, threads, newline, targets)
else:
run_stdout(filepaths, algorithms, threads, newline)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
|
import os
import re
from docutils import nodes
from sphinx import addnodes
from sphinx.errors import SphinxError
from sphinx.util import logging
import aplus_nodes
import directives.meta
import lib.yaml_writer as yaml_writer
import lib.html_tools as html_tools
import lib.toc_languages as toc_languages
from lib.revealrule import parse_reveal_rule
logger = logging.getLogger(__name__)
def prepare(app):
''' Prepares environment for configuration values. '''
yaml_writer.create_directory(app)
def set_config_language_for_doc(app, docname, source):
'''Try to set config.language for the document (docname).
The config.language value affects string localization in lib/translations.py
and the Sphinx core.
The language is read from the filename suffix (chapter_en.rst) or
its parent directory (module01/en/chapter.rst). If the language can not
be read from those sources, then config.language is not modified.
'''
if not app.config.enable_rst_file_language_detection:
return
filepath = app.env.doc2path(docname)
folder = os.path.basename(os.path.dirname(filepath))
# If language is not found in the docname or the folder, nothing is done.
# Then app.env.config.language is defined in conf.py.
if re.search(r"_[a-z]{2}$", docname):
# docname has a postfix with the underscore, e.g., chapter_en.rst
# docname does not include the file type extension .rst
app.env.config.language = docname[-2:]
elif re.fullmatch(r"^[a-z]{2}$", folder):
# directory name is 2 characters long, e.g., "en"
app.env.config.language = folder
def _is_multilingual_course(app):
root = app.env.get_doctree(app.config.master_doc)
tocs = list(root.traverse(addnodes.toctree))
return tocs and tocs[0].get('rawcaption') == 'Select language'
def add_lang_suffix_to_links(app, docname, source):
'''Add the language suffix to doc and ref link targets as well as ref link
labels in multilingual courses.
It is more convenient to write doc links without manually added language
suffixes, e.g., :doc:`chapter1` instead of :doc:`chapter1_en`. This function
adds the language suffixes automatically since Sphinx can not compile
the link if the target file does not exist.
Likewise, it is convenient to write identical ref link labels in the same
place in all language versions of the chapter. Sphinx requires that labels
are unique, thus language suffixes are automatically appended to the labels.
The ref links in the RST chapters also refer to the labels without the
language suffixes. The language suffixes are added automatically to
the ref links.
If the course uses a different format in links or for some other reason links
need to stay untouched, set enable_doc_link_multilang_suffix_correction to
False in order to disable doc link modifications and
enable_ref_link_multilang_suffix_correction to False in order to disable
ref link and label modifications. The variables are defined in conf.py.
'''
if (not app.config.enable_doc_link_multilang_suffix_correction and
not app.config.enable_ref_link_multilang_suffix_correction):
return
lang_suffix = docname[-3:]
# Check that the suffix is like _[a-z]{2}, for example, "_en".
if not re.fullmatch(r"^_[a-z]{2}$", lang_suffix):
return
# The source argument is a list whose only element is the content of the source file.
if app.config.enable_doc_link_multilang_suffix_correction:
# Links of the form :doc:`link text <path/file>` (no language suffix _en in the file path)
source[0] = re.sub(
r":doc:`([^`<>]+)<([^`<>]+)(?<!_[a-z]{2})>`",
r":doc:`\1<\2" + lang_suffix + r">`",
source[0])
# Links of the form :doc:`path/file` (no language suffix _en in the file path)
source[0] = re.sub(
r":doc:`([^`<>]+)(?<!_[a-z]{2})`",
r":doc:`\1" + lang_suffix + r"`",
source[0])
if not app.config.enable_ref_link_multilang_suffix_correction:
return
# Add language suffixes to label definitions (if they haven't been added manually).
# .. _mylabel:
# Labels are defined on their own lines, but there may be whitespace before them (indentation).
source[0] = re.sub(
r"^(\s*)..\s+_([\w-]+)(?<!_[a-z]{2}):(\s*)$",
r"\1.. _\2" + lang_suffix + r":\3",
source[0],
flags=re.MULTILINE)
# Links of the form :ref:`link text <label-name>` (no language suffix _en in the label)
source[0] = re.sub(
r":ref:`([^`<>]+)<([^`<>]+)(?<!_[a-z]{2})>`",
r":ref:`\1<\2" + lang_suffix + r">`",
source[0])
# Links of the form :ref:`label-name` (no language suffix _en in the label)
source[0] = re.sub(
r":ref:`([^`<>]+)(?<!_[a-z]{2})`",
r":ref:`\1" + lang_suffix + r"`",
source[0])
def write(app, exception):
''' Writes the table of contents level configuration. '''
if app.builder.name != 'html':
# course configuration YAML is only built with the Sphinx HTML builder
# because some parts of the YAML generation have only been implemented
# in the visit methods of the HTML builder (aplus_nodes functions
# visit_html and depart_html)
return
if exception:
return
root = app.env.get_doctree(app.config.master_doc)
# Check for language tree.
keys = set()
if _is_multilingual_course(app):
logger.info('Detected language tree.')
indexes = []
for docname,_,doc in traverse_tocs(app, root):
i = docname.rfind('_')
if i < 0:
raise SphinxError('Language postfix is required (e.g. docname_en): ' + docname)
lang = docname[(i + 1):]
logger.info('Traverse document elements to write configuration index ({}).'.format(lang))
index = make_index(app, doc, language=lang)
yaml_writer.write(yaml_writer.file_path(app.env, 'index_' + lang), index)
indexes.append((lang, index))
logger.info('Joining language tree to one index.')
index = toc_languages.join(app, indexes)
append_manual_content(app, index)
yaml_writer.write(yaml_writer.file_path(app.env, 'index'), index)
keys |= set(m['key'] for m in index['modules'])
else:
logger.info('Traverse document elements to write configuration index.')
index = make_index(app, root)
append_manual_content(app, index)
yaml_writer.write(yaml_writer.file_path(app.env, 'index'), index)
keys |= set(m['key'] for m in index['modules'])
# Rewrite links for remote inclusion.
keys |= {'toc', 'user', 'account'}
html_tools.rewrite_outdir(app.outdir, keys, app.config.static_host)
def make_index(app, root, language=''):
# metadata is defined in the field list of the RST document before any section
# and other content. The master_doc is the main index.rst file of the course.
# The syntax for field lists in RST is like this:
# :course-start: 2019-09-16 12:00
course_meta = app.env.metadata[app.config.master_doc]
course_title = app.config.course_title
course_open = course_meta.get('course-start', app.config.course_open_date)
course_close = course_meta.get('course-end', app.config.course_close_date)
# default late deadline for modules: if defined, all modules allow late submissions
course_late = course_meta.get('course-default-late', app.config.default_late_date)
course_penalty = course_meta.get('course-default-late-penalty', app.config.default_late_penalty)
override = app.config.override
course_reveal_submission_feedback = parse_reveal_rule(
app.config.reveal_submission_feedback,
'conf.py',
None,
'reveal_submission_feedback',
)
course_reveal_model_solutions = parse_reveal_rule(
app.config.reveal_model_solutions,
'conf.py',
None,
'reveal_model_solutions',
)
modules = []
category_keys = []
def get_static_dir(app):
i = 0
while i < len(app.outdir) and i < len(app.confdir) and app.outdir[i] == app.confdir[i]:
i += 1
outdir = app.outdir.replace("\\", "/")
if outdir[i] == '/':
i += 1
return outdir[i:]
def first_title(doc):
titles = list(doc.traverse(nodes.title))
return titles[0].astext() if titles else 'Unnamed'
def first_meta(doc):
metas = list(doc.traverse(directives.meta.aplusmeta))
return metas[0].options if metas else {}
# Tries to parse date from natural text.
def parse_date(src, allow_empty=False):
if allow_empty and not src:
return None
parts = src.split(' ', 1)
d = parts[0]
t = parts[1] if len(parts) > 1 else ''
if re.match(r'^\d\d.\d\d.\d\d\d\d$', d):
ds = d.split('.')
d = ds[2] + '-' + ds[1] + '-' + ds[0]
elif not re.match(r'^\d\d\d\d-\d\d-\d\d$', d):
raise SphinxError('Invalid date ' + d)
if not re.match(r'^\d\d(:\d\d(:\d\d)?)?$', t):
t = '12:00'
return d + ' ' + t
def parse_float(src, default):
return float(src) if src else default
# Recursive chapter parsing.
def parse_chapter(docname, doc, parent, module_meta):
for config_file in [e.yaml_write for e in doc.traverse(aplus_nodes.html) if e.has_yaml('exercise')]:
config = yaml_writer.read(config_file)
if config.get('_external', False):
exercise = config.copy()
del exercise['_external']
else:
exercise = {
'key': config['key'],
'config': config['key'] + '.yaml',
'max_submissions': config.get('max_submissions', 0),
'max_points': config.get('max_points', 0),
'difficulty': config.get('difficulty', ''),
'points_to_pass': config.get('points_to_pass', 0),
'category': config['category'],
'min_group_size': config.get('min_group_size', 1),
'max_group_size': config.get('max_group_size', 1),
'confirm_the_level': config.get('confirm_the_level', False),
}
allow_assistant_viewing = config.get('allow_assistant_viewing', app.config.allow_assistant_viewing)
allow_assistant_grading = config.get('allow_assistant_grading', app.config.allow_assistant_grading)
exercise.update({
'status': config.get('status', 'unlisted'),
'allow_assistant_viewing': allow_assistant_viewing,
'allow_assistant_grading': allow_assistant_grading,
})
if 'scale_points' in config:
exercise['max_points'] = config.pop('scale_points')
# Reveal rules: try exercise config, then module meta, then course config.
reveal_submission_feedback = config.get(
'reveal_submission_feedback',
module_meta.get(
'reveal-submission-feedback',
course_reveal_submission_feedback,
)
)
if reveal_submission_feedback:
exercise['reveal_submission_feedback'] = reveal_submission_feedback.copy()
reveal_model_solutions = config.get(
'reveal_model_solutions',
module_meta.get(
'reveal-model-solutions',
course_reveal_model_solutions,
)
)
if reveal_model_solutions:
exercise['reveal_model_solutions'] = reveal_model_solutions.copy()
if 'grading_mode' in config:
exercise['grading_mode'] = config.pop('grading_mode')
parent.append(exercise)
if not config['category'] in category_keys:
category_keys.append(config['category'])
for config_file in [e.yaml_write for e in doc.traverse(aplus_nodes.html) if e.has_yaml('exercisecollection')]:
config = yaml_writer.read(config_file)
exercise = {
'key': config['key'],
'max_points': config.get('max_points', 0),
'points_to_pass': config.get('points_to_pass', 0),
'target_url': config['target_url'],
'target_category': config['target_category'],
'category': config['category'],
'status': config.get('status', 'unlisted'),
'title': config['title'],
}
parent.append(exercise)
if not config['category'] in category_keys:
category_keys.append(config['category'])
category = 'chapter'
for name,hidden,child in traverse_tocs(app, doc):
meta = first_meta(child)
status = 'hidden' if 'hidden' in meta else (
'unlisted' if hidden else 'ready'
)
chapter = {
'status': status,
'name': first_title(child),
'static_content': name + '.html',
'category': category,
'use_wide_column': app.config.use_wide_column,
'children': [],
}
# If the chapter RST file is in a nested directory under the module
# directory (e.g., module01/material/chapter.rst instead of
# module01/chapter.rst), then the chapter key must contain parts of
# the nested directory names in order to be unique within the module.
# Different directories could contain files with the same names.
key_parts = name.split('/')
chapter['key'] = '_'.join(key_parts[1:])
if meta:
audience = meta.get('audience')
if audience:
chapter['audience'] = audience
if category in override:
chapter.update(override[category])
parent.append(chapter)
if not 'chapter' in category_keys:
category_keys.append('chapter')
parse_chapter(name, child, chapter['children'], module_meta)
# Read title from document.
if not course_title:
course_title = first_title(root)
# Traverse the documents using toctree directives.
title_date_re = re.compile(r'.*\(DL (.+)\)')
for docname,hidden,doc in traverse_tocs(app, root):
title = first_title(doc)
title_date_match = title_date_re.match(title)
meta = first_meta(doc)
status = 'hidden' if 'hidden' in meta else (
'unlisted' if hidden else 'ready'
)
read_open_src = meta.get('read-open-time', None)
open_src = meta.get('open-time', course_open)
close_src = meta.get('close-time', title_date_match.group(1) if title_date_match else course_close)
late_src = meta.get('late-time', course_late)
introduction = meta.get('introduction', None)
module = {
# modules01/index -> modules01
# modules/01/index -> modules_01
# modules/01/n/index -> modules_01_n
# ...
'key': docname if '/' not in docname else '_'.join(docname.split('/')[:-1]),
'status': status,
'name': title,
'points_to_pass': meta.get('points-to-pass', 0),
'children': [],
}
if read_open_src:
module['read-open'] = parse_date(read_open_src)
if open_src:
module['open'] = parse_date(open_src)
if close_src:
module['close'] = parse_date(close_src)
if late_src:
module['late_close'] = parse_date(late_src)
module['late_penalty'] = parse_float(meta.get('late-penalty', course_penalty), 0.0)
if introduction is not None:
module['introduction'] = introduction
modules.append(module)
parse_chapter(docname, doc, module['children'], meta)
# Create categories.
category_names = app.config.category_names
categories = {
key: {
'name': category_names.get(key, key),
} for key in category_keys
}
for key in ['chapter', 'feedback']:
if key in categories:
categories[key]['status'] = 'nototal'
# Build configuration index.
index = {
'name': course_title,
'static_dir': get_static_dir(app),
'modules': modules,
'categories': categories,
}
index['language'] = language if language else app.config.language
course_enrollment_start = course_meta.get('enrollment-start')
course_enrollment_end = course_meta.get('enrollment-end')
course_lifesupport_time = course_meta.get('lifesupport-time')
course_archive_time = course_meta.get('archive-time')
if course_open:
index['start'] = parse_date(course_open)
if course_close:
index['end'] = parse_date(course_close)
if course_enrollment_start is not None:
# None check separates the cases:
# 1) user inputs an empty value and it should be set into the YAML,
# 2) user does not define any value and no value should be set in YAML
index['enrollment_start'] = parse_date(course_enrollment_start, True)
if course_enrollment_end is not None:
index['enrollment_end'] = parse_date(course_enrollment_end, True)
if course_lifesupport_time is not None:
index['lifesupport_time'] = parse_date(course_lifesupport_time, True)
if course_archive_time is not None:
index['archive_time'] = parse_date(course_archive_time, True)
if course_meta.get('view-content-to'):
index['view_content_to'] = course_meta.get('view-content-to')
if course_meta.get('enrollment-audience'):
index['enrollment_audience'] = course_meta.get('enrollment-audience')
if course_meta.get('index-mode'):
index['index_mode'] = course_meta.get('index-mode')
if course_meta.get('content-numbering'):
index['content_numbering'] = course_meta.get('content-numbering')
if course_meta.get('module-numbering'):
index['module_numbering'] = course_meta.get('module-numbering')
if course_meta.get('numerate-ignoring-modules') is not None:
index['numerate_ignoring_modules'] = \
True if course_meta.get('numerate-ignoring-modules', False) not in (
False, 'false', 'False', 'no', 'No'
) else False
head_urls = course_meta.get('course-head-urls', app.config.course_head_urls)
if head_urls is not None:
# If the value is None, it is not set to the index.yaml nor aplus-json at all.
# If the value is an empty list, it is still part of the index.yaml
# and could be used to override a previous truthy value.
if isinstance(head_urls, str):
# convert to a list and remove empty strings
head_urls = list(filter(None, head_urls.split('\n')))
index['head_urls'] = head_urls
if course_meta.get('course-description') is not None:
index['course_description'] = course_meta.get('course-description')
if course_meta.get('course-footer') is not None:
index['course_footer'] = course_meta.get('course-footer')
return index
def append_manual_content(app, index):
def recursive_merge(config, append):
if type(append) == dict:
for key,val in append.items():
if not key in config:
config[key] = val
else:
recursive_merge(config[key], append[key])
elif type(append) == list:
for entry in append:
add = True
if 'key' in entry:
for old in config:
if 'key' in old and old['key'] == entry['key']:
recursive_merge(old, entry)
add = False
if add:
config.append(entry)
for path in app.config.append_content:
recursive_merge(index, yaml_writer.read(path))
def traverse_tocs(app, doc):
names = []
for toc in doc.traverse(addnodes.toctree):
hidden = toc.get('hidden', False)
for _,docname in toc.get('entries', []):
names.append((docname,hidden))
return [(name,hidden,app.env.get_doctree(name)) for name,hidden in names]
|
|
# Copyright 2015 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# [email protected]
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pyfftw import interfaces, _supported_types, _all_types_np
from .test_pyfftw_base import run_test_suites, np_fft
from ._get_default_args import get_default_args
from distutils.version import LooseVersion
import unittest
import numpy
import warnings
import copy
warnings.filterwarnings('always')
if LooseVersion(numpy.version.version) <= LooseVersion('1.6.2'):
# We overwrite the broken _cook_nd_args with a fixed version.
from ._cook_nd_args import _cook_nd_args
numpy.fft.fftpack._cook_nd_args = _cook_nd_args
complex_dtypes = []
real_dtypes = []
if '32' in _supported_types:
complex_dtypes.extend([numpy.complex64]*2)
real_dtypes.extend([numpy.float16, numpy.float32])
if '64' in _supported_types:
complex_dtypes.append(numpy.complex128)
real_dtypes.append(numpy.float64)
if 'ld' in _supported_types:
complex_dtypes.append(numpy.clongdouble)
real_dtypes.append(numpy.longdouble)
def make_complex_data(shape, dtype):
ar, ai = dtype(numpy.random.randn(2, *shape))
return ar + 1j*ai
def make_real_data(shape, dtype):
return dtype(numpy.random.randn(*shape))
def _numpy_fft_has_norm_kwarg():
"""returns True if numpy's fft supports the norm keyword argument
This should be true for numpy >= 1.10
"""
# return LooseVersion(numpy.version.version) >= LooseVersion('1.10')
try:
np_fft.fft(numpy.ones(4), norm=None)
return True
except TypeError:
return False
if _numpy_fft_has_norm_kwarg() and numpy.__version__ < '1.13':
# use version of numpy.fft.rfft* with normalisation bug fixed
# The patched version here, corresponds to the following bugfix PR:
# https://github.com/numpy/numpy/pull/8445
from numpy.fft import fftpack as fftpk
def rfft_fix(a, n=None, axis=-1, norm=None):
# from numpy.fft import fftpack_lite as fftpack
# from numpy.fft.fftpack import _raw_fft, _unitary, _real_fft_cache
a = numpy.array(a, copy=True, dtype=float)
output = fftpk._raw_fft(a, n, axis, fftpk.fftpack.rffti,
fftpk.fftpack.rfftf, fftpk._real_fft_cache)
if fftpk._unitary(norm):
if n is None:
n = a.shape[axis]
output *= 1 / numpy.sqrt(n)
return output
def rfftn_fix(a, s=None, axes=None, norm=None):
a = numpy.array(a, copy=True, dtype=float)
s, axes = fftpk._cook_nd_args(a, s, axes)
a = rfft_fix(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fftpk.fft(a, s[ii], axes[ii], norm)
return a
def rfft2_fix(a, s=None, axes=(-2, -1), norm=None):
return rfftn_fix(a, s, axes, norm)
np_fft.rfft = rfft_fix
np_fft.rfft2 = rfft2_fix
np_fft.rfftn = rfftn_fix
functions = {
'fft': 'complex',
'ifft': 'complex',
'rfft': 'r2c',
'irfft': 'c2r',
'rfftn': 'r2c',
'hfft': 'c2r',
'ihfft': 'r2c',
'irfftn': 'c2r',
'rfft2': 'r2c',
'irfft2': 'c2r',
'fft2': 'complex',
'ifft2': 'complex',
'fftn': 'complex',
'ifftn': 'complex'}
acquired_names = ('fftfreq', 'fftshift', 'ifftshift')
if LooseVersion(numpy.version.version) >= LooseVersion('1.8'):
acquired_names += ('rfftfreq', )
class InterfacesNumpyFFTTestModule(unittest.TestCase):
''' A really simple test suite to check the module works as expected.
'''
def test_acquired_names(self):
for each_name in acquired_names:
numpy_fft_attr = getattr(numpy.fft, each_name)
acquired_attr = getattr(interfaces.numpy_fft, each_name)
self.assertIs(numpy_fft_attr, acquired_attr)
class InterfacesNumpyFFTTestFFT(unittest.TestCase):
io_dtypes = {
'complex': (complex_dtypes, make_complex_data),
'r2c': (real_dtypes, make_real_data),
'c2r': (complex_dtypes, make_complex_data)}
validator_module = np_fft
test_interface = interfaces.numpy_fft
func = 'fft'
axes_kw = 'axis'
threads_arg_name = 'threads'
overwrite_input_flag = 'overwrite_input'
default_s_from_shape_slicer = slice(-1, None)
test_shapes = (
((100,), {}),
((128, 64), {'axis': 0}),
((128, 32), {'axis': -1}),
((59, 100), {}),
((59, 99), {'axis': -1}),
((59, 99), {'axis': 0}),
((32, 32, 4), {'axis': 1}),
((32, 32, 2), {'axis': 1, 'norm': 'ortho'}),
((64, 128, 16), {}),
)
# invalid_s_shapes is:
# (size, invalid_args, error_type, error_string)
invalid_args = (
((100,), ((100, 200),), TypeError, ''),
((100, 200), ((100, 200),), TypeError, ''),
((100,), (100, (-2, -1)), TypeError, ''),
((100,), (100, -20), IndexError, ''))
realinv = False
has_norm_kwarg = _numpy_fft_has_norm_kwarg()
@property
def test_data(self):
for test_shape, kwargs in self.test_shapes:
axes = self.axes_from_kwargs(kwargs)
s = self.s_from_kwargs(test_shape, kwargs)
if not self.has_norm_kwarg and 'norm' in kwargs:
kwargs.pop('norm')
if self.realinv:
test_shape = list(test_shape)
test_shape[axes[-1]] = test_shape[axes[-1]]//2 + 1
test_shape = tuple(test_shape)
yield test_shape, s, kwargs
def __init__(self, *args, **kwargs):
super(InterfacesNumpyFFTTestFFT, self).__init__(*args, **kwargs)
# Assume python 3, but keep backwards compatibility
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def validate(self, array_type, test_shape, dtype,
s, kwargs, copy_func=copy.copy):
# Do it without the cache
# without:
interfaces.cache.disable()
self._validate(array_type, test_shape, dtype, s, kwargs,
copy_func=copy_func)
def munge_input_array(self, array, kwargs):
return array
def _validate(self, array_type, test_shape, dtype,
s, kwargs, copy_func=copy.copy):
input_array = self.munge_input_array(
array_type(test_shape, dtype), kwargs)
orig_input_array = copy_func(input_array)
np_input_array = numpy.asarray(input_array)
# Why are long double inputs copied to double precision? It's what
# numpy silently does anyways as of v1.10 but helps with backward
# compatibility and scipy.
# https://github.com/pyFFTW/pyFFTW/pull/189#issuecomment-356449731
if np_input_array.dtype == 'clongdouble':
np_input_array = numpy.complex128(input_array)
elif np_input_array.dtype == 'longdouble':
np_input_array = numpy.float64(input_array)
with warnings.catch_warnings(record=True) as w:
# We catch the warnings so as to pick up on when
# a complex array is turned into a real array
if 'axes' in kwargs:
validator_kwargs = {'axes': kwargs['axes']}
elif 'axis' in kwargs:
validator_kwargs = {'axis': kwargs['axis']}
else:
validator_kwargs = {}
if self.has_norm_kwarg and 'norm' in kwargs:
validator_kwargs['norm'] = kwargs['norm']
try:
test_out_array = getattr(self.validator_module, self.func)(
copy_func(np_input_array), s, **validator_kwargs)
except Exception as e:
interface_exception = None
try:
getattr(self.test_interface, self.func)(
copy_func(input_array), s, **kwargs)
except Exception as _interface_exception:
# It's necessary to assign the exception to the
# already defined variable in Python 3.
# See http://www.python.org/dev/peps/pep-3110/#semantic-changes
interface_exception = _interface_exception
# If the test interface raised, so must this.
self.assertEqual(type(interface_exception), type(e),
msg='Interface exception raised. ' +
'Testing for: ' + repr(e))
return
try:
output_array = getattr(self.test_interface, self.func)(
copy_func(np_input_array), s, **kwargs)
except NotImplementedError as e:
# check if exception due to missing precision
msg = repr(e)
if 'Rebuild pyFFTW with support for' in msg:
self.skipTest(msg)
else:
raise
if (functions[self.func] == 'r2c'):
if numpy.iscomplexobj(input_array):
if len(w) > 0:
# Make sure a warning is raised
self.assertIs(
w[-1].category, numpy.ComplexWarning)
self.assertTrue(
numpy.allclose(output_array, test_out_array,
rtol=1e-2, atol=1e-4))
if _all_types_np.get(np_input_array.real.dtype, "") in _supported_types:
# supported precisions should not be converted
self.assertEqual(np_input_array.real.dtype,
output_array.real.dtype)
if (not self.overwrite_input_flag in kwargs or
not kwargs[self.overwrite_input_flag]):
self.assertTrue(numpy.allclose(input_array,
orig_input_array))
return output_array
def axes_from_kwargs(self, kwargs):
default_args = get_default_args(
getattr(self.test_interface, self.func))
if 'axis' in kwargs:
axes = (kwargs['axis'],)
elif 'axes' in kwargs:
axes = kwargs['axes']
if axes is None:
axes = default_args['axes']
else:
if 'axis' in default_args:
# default 1D
axes = (default_args['axis'],)
else:
# default nD
axes = default_args['axes']
if axes is None:
axes = (-1,)
return axes
def s_from_kwargs(self, test_shape, kwargs):
''' Return either a scalar s or a tuple depending on
whether axis or axes is specified
'''
default_args = get_default_args(
getattr(self.test_interface, self.func))
if 'axis' in kwargs:
s = test_shape[kwargs['axis']]
elif 'axes' in kwargs:
axes = kwargs['axes']
if axes is not None:
s = []
for each_axis in axes:
s.append(test_shape[each_axis])
else:
# default nD
s = []
try:
for each_axis in default_args['axes']:
s.append(test_shape[each_axis])
except TypeError:
try:
s = list(test_shape[
self.default_s_from_shape_slicer])
except TypeError:
# We had an integer as the default, so force
# it to be a list
s = [test_shape[self.default_s_from_shape_slicer]]
else:
if 'axis' in default_args:
# default 1D
s = test_shape[default_args['axis']]
else:
# default nD
s = []
try:
for each_axis in default_args['axes']:
s.append(test_shape[each_axis])
except TypeError:
s = None
return s
def test_valid(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_on_non_numpy_array(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
array_type = (lambda test_shape, dtype:
dtype_tuple[1](test_shape, dtype).tolist())
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(array_type,
test_shape, dtype, s, kwargs)
def test_fail_on_invalid_s_or_axes_or_norm(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, args, exception, e_str in self.invalid_args:
input_array = dtype_tuple[1](test_shape, dtype)
if len(args) > 2 and not self.has_norm_kwarg:
# skip tests invovling norm argument if it isn't available
continue
self.assertRaisesRegex(exception, e_str,
getattr(self.test_interface, self.func),
*((input_array,) + args))
def test_same_sized_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_bigger_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] += 2
except TypeError:
s += 2
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_smaller_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] -= 2
except TypeError:
s -= 2
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def check_arg(self, arg, arg_test_values, array_type, test_shape,
dtype, s, kwargs):
'''Check that the correct arg is passed to the builder'''
# We trust the builders to work as expected when passed
# the correct arg (the builders have their own unittests).
return_values = []
input_array = array_type(test_shape, dtype)
def fake_fft(*args, **kwargs):
return_values.append((args, kwargs))
return (args, kwargs)
try:
# Replace the function that is to be used
real_fft = getattr(self.test_interface, self.func)
setattr(self.test_interface, self.func, fake_fft)
_kwargs = kwargs.copy()
for each_value in arg_test_values:
_kwargs[arg] = each_value
builder_args = getattr(self.test_interface, self.func)(
input_array.copy(), s, **_kwargs)
self.assertTrue(builder_args[1][arg] == each_value)
# make sure it was called
self.assertTrue(len(return_values) > 0)
except:
raise
finally:
# Make sure we set it back
setattr(self.test_interface, self.func, real_fft)
# Validate it aswell
for each_value in arg_test_values:
_kwargs[arg] = each_value
builder_args = getattr(self.test_interface, self.func)(
input_array.copy(), s, **_kwargs)
self.validate(array_type, test_shape, dtype, s, _kwargs)
def test_auto_align_input(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.check_arg('auto_align_input', (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_auto_contiguous_input(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.check_arg('auto_contiguous', (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_bigger_and_smaller_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
i = -1
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] += i * 2
i *= i
except TypeError:
s += i * 2
i *= i
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_dtype_coercian(self):
# Make sure we input a dtype that needs to be coerced
if functions[self.func] == 'r2c':
dtype_tuple = self.io_dtypes['complex']
else:
dtype_tuple = self.io_dtypes['r2c']
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_planner_effort(self):
'''Test the planner effort arg
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
test_shape = (16,)
for dtype in dtype_tuple[0]:
s = None
if self.axes_kw == 'axis':
kwargs = {'axis': -1}
else:
kwargs = {'axes': (-1,)}
for each_effort in ('FFTW_ESTIMATE', 'FFTW_MEASURE',
'FFTW_PATIENT', 'FFTW_EXHAUSTIVE'):
kwargs['planner_effort'] = each_effort
self.validate(
dtype_tuple[1], test_shape, dtype, s, kwargs)
kwargs['planner_effort'] = 'garbage'
self.assertRaisesRegex(ValueError, 'Invalid planner effort',
self.validate,
*(dtype_tuple[1], test_shape, dtype, s, kwargs))
def test_threads_arg(self):
'''Test the threads argument
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
test_shape = (16,)
for dtype in dtype_tuple[0]:
s = None
if self.axes_kw == 'axis':
kwargs = {'axis': -1}
else:
kwargs = {'axes': (-1,)}
self.check_arg(self.threads_arg_name, (1, 2, 5, 10),
dtype_tuple[1], test_shape, dtype, s, kwargs)
kwargs[self.threads_arg_name] = 'bleh'
# Should not work
self.assertRaises(TypeError,
self.validate,
*(dtype_tuple[1], test_shape, dtype, s, kwargs))
def test_overwrite_input(self):
'''Test the overwrite_input flag
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, _kwargs in self.test_data:
s = None
kwargs = _kwargs.copy()
self.validate(dtype_tuple[1], test_shape, dtype, s, kwargs)
self.check_arg(self.overwrite_input_flag, (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_input_maintained(self):
'''Test to make sure the input is maintained by default.
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
input_array = dtype_tuple[1](test_shape, dtype)
orig_input_array = input_array.copy()
getattr(self.test_interface, self.func)(
input_array, s, **kwargs)
self.assertTrue(
numpy.alltrue(input_array == orig_input_array))
def test_on_non_writeable_array_issue_92(self):
'''Test to make sure that locked arrays work.
Regression test for issue 92.
'''
def copy_with_writeable(array_to_copy):
array_copy = array_to_copy.copy()
array_copy.flags.writeable = array_to_copy.flags.writeable
return array_copy
dtype_tuple = self.io_dtypes[functions[self.func]]
def array_type(test_shape, dtype):
a = dtype_tuple[1](test_shape, dtype)
a.flags.writeable = False
return a
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(array_type,
test_shape, dtype, s, kwargs,
copy_func=copy_with_writeable)
def test_overwrite_input_for_issue_92(self):
'''Tests that trying to overwrite a locked array fails.
'''
a = numpy.zeros((4,))
a.flags.writeable = False
self.assertRaisesRegex(
ValueError,
'overwrite_input cannot be True when the ' +
'input array flags.writeable is False',
interfaces.numpy_fft.fft,
a, overwrite_input=True)
class InterfacesNumpyFFTTestIFFT(InterfacesNumpyFFTTestFFT):
func = 'ifft'
class InterfacesNumpyFFTTestRFFT(InterfacesNumpyFFTTestFFT):
func = 'rfft'
class InterfacesNumpyFFTTestIRFFT(InterfacesNumpyFFTTestFFT):
func = 'irfft'
realinv = True
class InterfacesNumpyFFTTestHFFT(InterfacesNumpyFFTTestFFT):
func = 'hfft'
realinv = True
class InterfacesNumpyFFTTestIHFFT(InterfacesNumpyFFTTestFFT):
func = 'ihfft'
class InterfacesNumpyFFTTestFFT2(InterfacesNumpyFFTTestFFT):
axes_kw = 'axes'
func = 'ifft2'
test_shapes = (
((128, 64), {'axes': None}),
((128, 32), {'axes': None}),
((128, 32, 4), {'axes': (0, 2)}),
((59, 100), {'axes': (-2, -1)}),
((32, 32), {'axes': (-2, -1), 'norm': 'ortho'}),
((64, 128, 16), {'axes': (0, 2)}),
((4, 6, 8, 4), {'axes': (0, 3)}),
)
invalid_args = (
((100,), ((100, 200),), ValueError, ''),
((100, 200), ((100, 200, 100),), ValueError, ''),
((100,), ((100, 200), (-3, -2, -1)), ValueError, ''),
((100, 200), (100, -1), TypeError, ''),
((100, 200), ((100, 200), (-3, -2)), IndexError, 'Invalid axes'),
((100, 200), ((100,), (-3,)), IndexError, 'Invalid axes'),
# pass invalid normalisation string
((100, 200), ((100,), (-3,), 'invalid_norm'), ValueError, ''))
def test_shape_and_s_different_lengths(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, _kwargs in self.test_data:
kwargs = copy.copy(_kwargs)
try:
s = s[1:]
except TypeError:
self.skipTest('Not meaningful test on 1d arrays.')
del kwargs['axes']
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
class InterfacesNumpyFFTTestIFFT2(InterfacesNumpyFFTTestFFT2):
func = 'ifft2'
class InterfacesNumpyFFTTestRFFT2(InterfacesNumpyFFTTestFFT2):
func = 'rfft2'
class InterfacesNumpyFFTTestIRFFT2(InterfacesNumpyFFTTestFFT2):
func = 'irfft2'
realinv = True
class InterfacesNumpyFFTTestFFTN(InterfacesNumpyFFTTestFFT2):
func = 'ifftn'
test_shapes = (
((128, 32, 4), {'axes': None}),
((64, 128, 16), {'axes': (0, 1, 2)}),
((4, 6, 8, 4), {'axes': (0, 3, 1)}),
((4, 6, 4, 4), {'axes': (0, 3, 1), 'norm': 'ortho'}),
((4, 6, 8, 4), {'axes': (0, 3, 1, 2)}),
)
class InterfacesNumpyFFTTestIFFTN(InterfacesNumpyFFTTestFFTN):
func = 'ifftn'
class InterfacesNumpyFFTTestRFFTN(InterfacesNumpyFFTTestFFTN):
func = 'rfftn'
class InterfacesNumpyFFTTestIRFFTN(InterfacesNumpyFFTTestFFTN):
func = 'irfftn'
realinv = True
test_cases = (
InterfacesNumpyFFTTestModule,
InterfacesNumpyFFTTestFFT,
InterfacesNumpyFFTTestIFFT,
InterfacesNumpyFFTTestRFFT,
InterfacesNumpyFFTTestIRFFT,
InterfacesNumpyFFTTestHFFT,
InterfacesNumpyFFTTestIHFFT,
InterfacesNumpyFFTTestFFT2,
InterfacesNumpyFFTTestIFFT2,
InterfacesNumpyFFTTestRFFT2,
InterfacesNumpyFFTTestIRFFT2,
InterfacesNumpyFFTTestFFTN,
InterfacesNumpyFFTTestIFFTN,
InterfacesNumpyFFTTestRFFTN,
InterfacesNumpyFFTTestIRFFTN,)
#test_set = {'InterfacesNumpyFFTTestHFFT': ('test_valid',)}
test_set = None
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
|
|
"""
Module otsun.math with mathematical helper functions
"""
import numpy as np
from FreeCAD import Base
import random
import time
from functools import wraps
EPSILON = 1E-6
# Tolerance for considering equal to zero
INF = 1E20
# Infinite
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
def polar_to_cartesian(phi, theta):
"""Convert polar coordinates of unit vector to cartesian
Parameters
----------
phi : float
phi angle (ISO 31-11) in degrees
theta : float
theta angle (ISO 31-11) in degrees
Returns
-------
Base.Vector
"""
rad = np.pi / 180.0
x = np.sin(theta * rad) * np.cos(phi * rad)
y = np.sin(theta * rad) * np.sin(phi * rad)
z = np.cos(theta * rad)
return Base.Vector(x, y, z)
def rad_to_deg(angle):
"""Converts radians to degrees"""
return angle * 180.0 / np.pi
# ---
# Helper functions for input of functions
# ---
def constant_function(c):
"""Create a constant function
Parameters
----------
c : float
constant to return
Returns
-------
function
Constant function equal to `c`
"""
return lambda x: c
def tabulated_function(xvalues, yvalues):
"""Create a linear interpolating function from tabulated values
Parameters
----------
xvalues : list of float
x coordinates of the tabulated values
yvalues : list of float
y coordinates of the tabulated values
Returns
-------
function
Function that interpolates by straight line segments the input data
"""
# @memoize
@lru_cache(maxsize=None)
def this_tabulated_function(x):
return np.interp(x, xvalues, yvalues)
return this_tabulated_function
# # ---
# # Helper function for random Linear congruential generator
# # ---
# _previous = None
# def random_congruential(seed=None):
# """Random Linear congruential generator based on MTH$RANDOM
#
# Parameters
# ----------
# seed : float
# seed to use in the generation of random numbers
#
# Returns
# -------
# float
#
# """
# # http://daviddeley.com/random/random4.htm
# a = 69069.0
# c = 1.0
# m = 2.0 ** 32.0
# rm = 1.0 / m
# global _previous
# if not seed:
# if not _previous:
# _previous = time.time()
# else:
# _previous = seed
# _previous = np.remainder(_previous * a + c, m)
# return _previous * rm
# ---
# Define the random algorithm
# ---
# myrandom = random_congruential
myrandom = random.random
# ---
# Helper function for Cumulative Function Distribution and Randomly generatting distribution
# ---
def cdf_from_pdf_file(data_file):
"""
Computes CDF from PDF values stored in a file
Creates a Cumulative Distribution Function from Probability Density
Function data file. Each line must be a pair of numbers x y=pdf(x).
It returns the CDF as two lists; first on is the list of x-values,
second one is the list of corresponding CDF values.
Parameters
----------
data_file: file or str
file or filename where PDF values are stored
Returns
-------
list of float, list of float
x-values and y-values of CDF
"""
data_array = np.loadtxt(data_file, usecols=(0, 1))
x = data_array[:, 0]
y = data_array[:, 1]
x_cdf = x
n = np.size(y)
y_ii = []
for i in np.arange(n - 1):
y_i = (y[i + 1] + y[i]) / 2.0 * (x[i + 1] - x[i])
y_ii = np.append(y_ii, y_i)
y_ii = np.append(y_ii, y_ii[-1])
k_integration = np.trapz(y_ii, x_cdf)
y_cdf = np.cumsum(y_ii) / k_integration
return x_cdf, y_cdf / y_cdf[-1]
def pick_random_from_cdf(cdf):
"""
Pick a random value according to a given CDF.
We apply the Inverse transform sampling: https://en.wikipedia.org/wiki/Inverse_transform_sampling
Parameters
----------
cdf : tuple of list of float
First list is list of x-values; second one is list of values of CDF
Returns
-------
float
"""
return np.interp(random.random(), cdf[1], cdf[0])
def parallel_orthogonal_components(vector, incident, normal):
"""Decomposition of vector in components
Given `vector` (a polarization),
`incident` (direction of a ray) and
`normal` (vector orthogonal to a plane),
decompose `vector` it in
a component contained in the reflection (parallel) plane (det. by normal and incident): p-polarized (parallel) light
a component contained in the orthogonal plane to the reflection plane: s-polarized (perpendicular) light
also returns the normal vector to the reflection plane
Parameters
----------
vector : Base.Vector
incident : Base.Vector
normal : Base.Vector
Returns
-------
parallel : Base.Vector
orthogonal : Base.Vector
normal_of_parallel_plane: Base.Vector
"""
polarization_vector = vector
normal_parallel_plane = incident.cross(normal)
# orthogonal vector to reflection plane (parallel_plane)
if normal_parallel_plane.Length < EPSILON:
normal_parallel_plane = one_orthogonal_vector(normal)
normal_parallel_plane.normalize()
normal_perpendicular_plane = incident.cross(normal_parallel_plane)
# orthogonal vector to perpendicular_plane
parallel_v = (polarization_vector -
normal_parallel_plane * polarization_vector.dot(normal_parallel_plane))
# parallel_v is the projection of polarization_vector onto parallel_plane
perpendicular_v = (polarization_vector -
normal_perpendicular_plane * polarization_vector.dot(normal_perpendicular_plane))
# perpendicular_v is the projection of polarization_vector onto the perpendicular_plane
return parallel_v, perpendicular_v, normal_parallel_plane
def two_orthogonal_vectors(vector):
"""Gives two orthogonal vectors of a vector
Given `vector` find two orthogonal vectors
Parameters
----------
vector : Base.Vector
Returns
-------
orthogonal_1 : Base.Vector
orthogonal_2 : Base.Vector
"""
orthogonal_1 = one_orthogonal_vector(vector)
orthogonal_2 = vector.cross(orthogonal_1)
return orthogonal_1.normalize(), orthogonal_2.normalize()
def one_orthogonal_vector(vector):
"""Gives one orthogonal vector of a vector
Given `vector` find one orthogonal vector
Parameters
----------
vector : Base.Vector
Returns
-------
orthogonal : Base.Vector
"""
min_pos = np.argmin([abs(vector[0]), abs(vector[1]), abs(vector[2])])
if min_pos == 0:
orthogonal = Base.Vector(0, vector[2], -vector[1])
elif min_pos == 1:
orthogonal = Base.Vector(vector[2], 0, -vector[0])
else:
orthogonal = Base.Vector(vector[1], -vector[0], 0)
return orthogonal.normalize()
def correct_normal(normal, incident):
"""Corrects a vector so that is in a given half plane
Parameters
----------
normal : Base.Vector
incident : Base.Vector
Returns
-------
"""
if normal.dot(incident) > 0:
return normal * (-1)
else:
return normal
def normalize(vector):
"""Normalizes a vector"""
if vector.Length < EPSILON:
vector = vector * INF
return vector.normalize()
def arccos(x):
"""Safe modification of arccos"""
assert abs(x) < 1 + EPSILON
if abs(x) < 1 - EPSILON:
return np.arccos(x)
if x > 0:
return 0
return np.pi
def projection_on_vector(u, v):
"""Compute the projection of u on <v>"""
return (u.dot(v)/v.dot(v))*v
def projection_on_orthogonal_of_vector(u, v):
"""Compute the projection of u on the subspace orthogonal to <v>"""
return u - projection_on_vector(u, v)
def area_of_triangle(vertices):
"""Compute the area of the triangle with given vertices"""
p, q, r = vertices
pq = q-p
pr = r-p
v = pq.cross(pr)
return 0.5 * abs(v.Length)
def random_point_of_triangle(vertices):
"""Compute a random point of the triangle with given vertices"""
p, q, r = vertices
pq = q-p
pr = r-p
while True:
x = random.random()
y = random.random()
if x + y <= 1:
return p + pq*x + pr*y
|
|
# Copyright (c) 2011-13 Walter Bender
# Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from dbus.service import signal
from dbus.gobject_service import ExportedGObject
import telepathy
import os
import gtk
from gettext import gettext as _
from TurtleArt.tautils import (data_to_string, data_from_string, get_path,
base64_to_image, debug_output, error_output)
from TurtleArt.taconstants import DEFAULT_TURTLE_COLORS
try:
from sugar import profile
from sugar.presence import presenceservice
from sugar.presence.tubeconn import TubeConnection
except:
profile = None
from collaboration import presenceservice
from collaboration.tubeconn import TubeConnection
SERVICE = 'org.laptop.TurtleArtActivity'
IFACE = SERVICE
PATH = '/org/laptop/TurtleArtActivity'
class Collaboration():
def __init__(self, tw, activity):
""" A simplistic sharing model: the sharer is the master """
self._tw = tw
self._tw.send_event = self.send_event
self._tw.remote_turtle_dictionary = {}
self._activity = activity
self._setup_dispatch_table()
def setup(self):
# TODO: hand off role of master if sharer leaves
self.pservice = presenceservice.get_instance()
self.initiating = None # sharing (True) or joining (False)
# Add my buddy object to the list
owner = self.pservice.get_owner()
self.owner = owner
self._tw.buddies.append(self.owner)
self._share = ''
self._activity.connect('shared', self._shared_cb)
self._activity.connect('joined', self._joined_cb)
def _setup_dispatch_table(self):
self._processing_methods = {
't': self._turtle_request,
'T': self._receive_turtle_dict,
'R': self._reskin_turtle,
'f': self._move_forward,
'a': self._move_in_arc,
'r': self._rotate_turtle,
'x': self._set_xy,
'W': self._draw_text,
'c': self._set_pen_color,
'g': self._set_pen_gray_level,
's': self._set_pen_shade,
'w': self._set_pen_width,
'p': self._set_pen_state,
'F': self._fill_polygon,
'P': self._draw_pixbuf,
'B': self._paste,
'S': self._speak
}
def _shared_cb(self, activity):
self._shared_activity = self._activity.get_shared_activity()
if self._shared_activity is None:
debug_output('Failed to share or join activity ... \
_shared_activity is null in _shared_cb()',
self._tw.running_sugar)
return
self._tw.set_sharing(True)
self.initiating = True
self.waiting_for_turtles = False
self._tw.remote_turtle_dictionary = self._get_dictionary()
debug_output('I am sharing...', self._tw.running_sugar)
self.conn = self._shared_activity.telepathy_conn
self.tubes_chan = self._shared_activity.telepathy_tubes_chan
self.text_chan = self._shared_activity.telepathy_text_chan
self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].connect_to_signal(
'NewTube', self._new_tube_cb)
debug_output('This is my activity: making a tube...',
self._tw.running_sugar)
self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].OfferDBusTube(
SERVICE, {})
self._enable_share_button()
def _joined_cb(self, activity):
self._shared_activity = self._activity.get_shared_activity()
if self._shared_activity is None:
debug_output('Failed to share or join activity ... \
_shared_activity is null in _shared_cb()',
self._tw.running_sugar)
return
self._tw.set_sharing(True)
self.initiating = False
self.conn = self._shared_activity.telepathy_conn
self.tubes_chan = self._shared_activity.telepathy_tubes_chan
self.text_chan = self._shared_activity.telepathy_text_chan
# call back for "NewTube" signal
self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].connect_to_signal(
'NewTube', self._new_tube_cb)
debug_output('I am joining an activity: waiting for a tube...',
self._tw.running_sugar)
self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES].ListTubes(
reply_handler=self._list_tubes_reply_cb,
error_handler=self._list_tubes_error_cb)
# Joiner should request current state from sharer.
self.waiting_for_turtles = True
self._enable_share_button()
def _enable_share_button(self):
self._activity.share_button.set_icon('shareon')
self._activity.share_button.set_tooltip(_('Share selected blocks'))
def _list_tubes_reply_cb(self, tubes):
for tube_info in tubes:
self._new_tube_cb(*tube_info)
def _list_tubes_error_cb(self, e):
error_output('ListTubes() failed: %s' % (e), self._tw.running_sugar)
def _new_tube_cb(self, id, initiator, type, service, params, state):
""" Create a new tube. """
debug_output(
'New tube: ID=%d initator=%d type=%d service=%s \
params=%r state=%d' %
(id, initiator, type, service, params, state), self._tw.running_sugar)
if (type == telepathy.TUBE_TYPE_DBUS and service == SERVICE):
if state == telepathy.TUBE_STATE_LOCAL_PENDING:
self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES]\
.AcceptDBusTube(id)
tube_conn = TubeConnection(
self.conn,
self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES],
id,
group_iface=self.text_chan[telepathy.CHANNEL_INTERFACE_GROUP])
# We'll use a chat tube to send serialized stacks back and forth.
self.chattube = ChatTube(tube_conn, self.initiating,
self.event_received_cb)
# Now that we have the tube, we can ask for the turtle dictionary.
if self.waiting_for_turtles: # A joiner must wait for turtles.
debug_output('Sending a request for the turtle dictionary',
self._tw.running_sugar)
# We need to send our own nick, colors, and turtle position
colors = self._get_colors()
event = 't|' + data_to_string([self._get_nick(), colors])
debug_output(event, self._tw.running_sugar)
self.send_event(event)
def event_received_cb(self, event_message):
"""
Events are sent as a tuple, nick|cmd, where nick is a turle name
and cmd is a turtle event. Everyone gets the turtle dictionary from
the sharer and watches for 't' events, which indicate that a new
turtle has joined.
"""
if len(event_message) == 0:
return
# Save active Turtle
save_active_turtle = self._tw.turtles.get_active_turtle()
try:
command, payload = event_message.split('|', 2)
except ValueError:
debug_output('Could not split event message [%s]' % event_message,
self._tw.running_sugar)
self._processing_methods[command](payload)
# Restore active Turtle
self._tw.turtles.set_turtle(
self._tw.turtles.get_turtle_key(save_active_turtle))
def send_event(self, entry):
""" Send event through the tube. """
if hasattr(self, 'chattube') and self.chattube is not None:
self.chattube.SendText(entry)
def _turtle_request(self, payload):
''' incoming turtle from a joiner '''
if payload > 0:
[nick, colors] = data_from_string(payload)
if nick != self._tw.nick: # It is not me.
# There may not be a turtle dictionary.
if hasattr(self._tw, 'remote_turtle_dictionary'):
# Make sure it is not a "rejoin".
if not nick in self._tw.remote_turtle_dictionary:
# Add new turtle for the joiner.
self._tw.turtles.set_turtle(nick, colors)
self._tw.label_remote_turtle(nick, colors)
self._tw.remote_turtle_dictionary[nick] = colors
else:
self._tw.remote_turtle_dictionary = self._get_dictionary()
# Add new turtle for the joiner.
self._tw.turtles.set_turtle(nick, colors)
self._tw.label_remote_turtle(nick, colors)
# Sharer should send the updated remote turtle dictionary to everyone.
if self.initiating:
if not self._tw.nick in self._tw.remote_turtle_dictionary:
self._tw.remote_turtle_dictionary[self._tw.nick] = \
self._get_colors()
event_payload = data_to_string(self._tw.remote_turtle_dictionary)
self.send_event('T|' + event_payload)
self.send_my_xy() # And the sender should report her xy position.
def _receive_turtle_dict(self, payload):
''' Any time there is a new joiner, an updated turtle dictionary is
circulated. Everyone must report their turtle positions so that we
are in sync. '''
if self.waiting_for_turtles:
if len(payload) > 0:
# Grab the new remote turtles dictionary.
remote_turtle_dictionary = data_from_string(payload)
# Add see what is new.
for nick in remote_turtle_dictionary:
if nick == self._tw.nick:
debug_output('skipping my nick %s' %
(nick), self._tw.running_sugar)
elif nick != self._tw.remote_turtle_dictionary:
# Add new the turtle.
colors = remote_turtle_dictionary[nick]
self._tw.remote_turtle_dictionary[nick] = colors
self._tw.turtles.set_turtle(nick, colors)
# Label the remote turtle.
self._tw.label_remote_turtle(nick, colors)
debug_output('adding %s to remote turtle dictionary' %
(nick), self._tw.running_sugar)
else:
debug_output('%s already in remote turtle dictionary' %
(nick), self._tw.running_sugar)
self.waiting_for_turtles = False
self.send_my_xy()
def send_my_xy(self):
''' Set xy location so joiner can sync turtle positions. Should be
used to sync positions after turtle drag. '''
self._tw.turtles.set_turtle(self._get_nick())
if self._tw.turtles.get_active_turtle().get_pen_state():
self.send_event('p|%s' % (data_to_string([self._get_nick(),
False])))
put_pen_back_down = True
else:
put_pen_back_down = False
self.send_event('x|%s' % (data_to_string(
[self._get_nick(),
[int(self._tw.turtles.get_active_turtle().get_xy()[0]),
int(self._tw.turtles.get_active_turtle().get_xy()[1])]])))
if put_pen_back_down:
self.send_event('p|%s' % (data_to_string([self._get_nick(),
True])))
self.send_event('r|%s' % (data_to_string(
[self._get_nick(),
int(self._tw.turtles.get_active_turtle().get_heading())])))
def _reskin_turtle(self, payload):
if len(payload) > 0:
[nick, [width, height, data]] = data_from_string(payload)
if nick != self._tw.nick:
if self._tw.running_sugar:
tmp_path = get_path(self._tw.activity, 'instance')
else:
tmp_path = '/tmp'
file_name = base64_to_image(data, tmp_path)
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(file_name,
width, height)
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_shapes([pixbuf])
def _draw_pixbuf(self, payload):
if len(payload) > 0:
[nick, [a, b, x, y, w, h, width, height, data]] =\
data_from_string(payload)
if nick != self._tw.nick:
if self._tw.running_sugar:
tmp_path = get_path(self._tw.activity, 'instance')
else:
tmp_path = '/tmp'
file_name = base64_to_image(data, tmp_path)
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(file_name,
width, height)
pos = self._tw.turtles.turtle_to_screen_coordinates((x, y))
self._tw.turtles.get_active_turtle().draw_pixbuf(
pixbuf, a, b, pos[0], pos[1], w, h, file_name, False)
def _move_forward(self, payload):
if len(payload) > 0:
[nick, x] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().forward(x, False)
def _move_in_arc(self, payload):
if len(payload) > 0:
[nick, [a, r]] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().arc(a, r, False)
def _rotate_turtle(self, payload):
if len(payload) > 0:
[nick, h] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_heading(h, False)
def _set_xy(self, payload):
if len(payload) > 0:
[nick, [x, y]] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_xy(x, y, share=False)
def _draw_text(self, payload):
if len(payload) > 0:
[nick, [label, x, y, size, w]] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().draw_text(
label, x, y, size, w, False)
def _set_pen_color(self, payload):
if len(payload) > 0:
[nick, x] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_color(x, False)
def _set_pen_gray_level(self, payload):
if len(payload) > 0:
[nick, x] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_gray(x, False)
def _set_pen_shade(self, payload):
if len(payload) > 0:
[nick, x] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_shade(x, False)
def _set_pen_width(self, payload):
if len(payload) > 0:
[nick, x] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_pen_size(x, False)
def _set_pen_state(self, payload):
if len(payload) > 0:
[nick, x] = data_from_string(payload)
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_pen_state(x, False)
def _fill_polygon(self, payload):
if len(payload) > 0:
[nick, poly_points] = data_from_string(payload)
shared_poly_points = []
for i in range(len(poly_points)):
x, y = self._tw.turtles.screen_to_turtle_coordinates(
(poly_points[i][1], poly_points[i][2]))
if poly_points[i][0] in ['move', 'line']:
shared_poly_points.append((poly_points[i][0], x, y))
elif poly_points[i][0] in ['rarc', 'larc']:
shared_poly_points.append(
(poly_points[i][0],
x,
y,
poly_points[i][3],
poly_points[i][4],
poly_points[i][5]))
if nick != self._tw.nick:
self._tw.turtles.set_turtle(nick)
self._tw.turtles.get_active_turtle().set_poly_points(
shared_poly_points)
self._tw.turtles.get_active_turtle().stop_fill(False)
def _speak(self, payload):
if len(payload) > 0:
[nick, language_option, text] = data_from_string(payload)
if language_option == 'None':
language_option = ''
if text is not None:
os.system('espeak %s "%s" --stdout | aplay' %
(language_option, str(text)))
def _paste(self, payload):
if len(payload) > 0:
[nick, text] = data_from_string(payload)
if text is not None:
self._tw.process_data(data_from_string(text),
self._tw.paste_offset)
self._tw.paste_offset += 20
def _get_dictionary(self):
return {self._get_nick(): self._get_colors()}
def _get_nick(self):
return self._tw.nick
def _get_colors(self):
colors = None
if self._tw.running_sugar:
if profile.get_color() is not None:
colors = profile.get_color().to_string()
else:
colors = self._activity.get_colors()
if colors is None:
colors = '%s,%s' % (DEFAULT_TURTLE_COLORS[0],
DEFAULT_TURTLE_COLORS[1])
return colors.split(',')
class ChatTube(ExportedGObject):
def __init__(self, tube, is_initiator, stack_received_cb):
"""Class for setting up tube for sharing."""
super(ChatTube, self).__init__(tube, PATH)
self.tube = tube
self.is_initiator = is_initiator # Are we sharing or joining activity?
self.stack_received_cb = stack_received_cb
self.stack = ''
self.tube.add_signal_receiver(self.send_stack_cb, 'SendText', IFACE,
path=PATH, sender_keyword='sender')
def send_stack_cb(self, text, sender=None):
if sender == self.tube.get_unique_name():
return
self.stack = text
self.stack_received_cb(text)
@signal(dbus_interface=IFACE, signature='s')
def SendText(self, text):
self.stack = text
|
|
# -*- test-case-name: twistedchecker.test.test_runner -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
import os
import re
from astroid.modutils import file_from_modpath
from pylint.checkers.base import NameChecker
from pylint.lint import PyLinter
from twisted.python.compat import NativeStringIO
import twistedchecker
from twistedchecker.checkers import patch_pylint_format
from twistedchecker.core.exceptionfinder import findAllExceptions
from twistedchecker.reporters.limited import LimitedReporter
class Runner():
"""
Run and control the checking process.
"""
outputStream = None
linter = None
allowOptions = None
# Customized checkers.
checkers = ("header.HeaderChecker",
"names.TwistedNamesChecker",
"docstring.DocstringChecker",
"formattingoperation.FormattingOperationChecker",
"comment.CommentChecker",
"testclassname.TestClassNameChecker")
allowedMessagesFromPylint = ("F0001",
"C0103",
"C0301",
"W0311",
"W0312")
diffOption = None
errorResultRead = "Error: Failed to read result file '%s'.\n"
prefixModuleName = "************* Module "
regexLineStart = "^[WCEFR]\d{4}\:"
def __init__(self):
"""
Initialize C{PyLinter} object, and load configuration file.
"""
self.allowOptions = True
self.linter = PyLinter(self._makeOptions())
# register standard checkers.
self.linter.load_default_plugins()
# read configuration.
pathConfig = os.path.join(twistedchecker.abspath,
"configuration", "pylintrc")
self.linter.read_config_file(pathConfig)
# now we can load file config and command line, plugins (which can
# provide options) have been registered.
self.linter.load_config_file()
allowedMessages = self.registerCheckers()
# disable messages
disabledMessages = set(self.linter
.cfgfile_parser.get("TWISTEDCHECKER", "disable")
.replace(" ", "").split(","))
if disabledMessages != {""}:
for msg in disabledMessages:
self.linter.disable(msg)
allowedMessages -= disabledMessages
# set default output stream to stdout
self.setOutput(sys.stdout)
# set default reporter to limited reporter
self.linter.set_reporter(LimitedReporter(allowedMessages))
def _makeOptions(self):
"""
Return options for twistedchecker.
"""
return (
("diff",
{"type": "string",
"metavar": "<result-file>",
"help": "Set comparing result file to automatically "
"generate a diff."}
),
('pep8',
{'type': 'yn', 'metavar': '<y_or_n>',
'default': False,
'help': 'Show pep8 warnings.'}
),
('strict-epydoc',
{'type': 'yn', 'metavar': '<y_or_n>',
'default': False,
'help': "Check '@type' and '@rtype' in epydoc."}
),
)
def setOutput(self, stream):
"""
Set the stream to output result of checking.
@param stream: output stream, defaultly it should be stdout
"""
self.outputStream = stream
sys.stdout = stream
def displayHelp(self):
"""
Output help message of twistedchecker.
"""
self.outputStream.write(self.linter.help())
sys.exit(32)
def registerCheckers(self):
"""
Register all checkers of TwistedChecker to C{PyLinter}.
@return: a list of allowed messages
"""
# We patch the default pylint format checker.
patch_pylint_format.patch()
# register checkers
allowedMessages = list(self.allowedMessagesFromPylint)
for strChecker in self.checkers:
modname, classname = strChecker.split(".")
strModule = "twistedchecker.checkers.%s" % modname
checker = getattr(__import__(strModule,
fromlist=["twistedchecker.checkers"]),
classname)
instanceChecker = checker(self.linter)
allowedMessages += list(instanceChecker.msgs.keys())
self.linter.register_checker(instanceChecker)
self.restrictCheckers(allowedMessages)
return set(allowedMessages)
def unregisterChecker(self, checker):
"""
Remove a checker from the list of registered checkers.
@param checker: the checker to remove
"""
self.linter._checkers[checker.name].remove(checker)
if checker in self.linter._reports:
del self.linter._reports[checker]
if checker in self.linter.options_providers:
self.linter.options_providers.remove(checker)
def findUselessCheckers(self, allowedMessages):
"""
Find checkers which generate no allowed messages.
@param allowedMessages: allowed messages
@return: useless checkers, remove them from pylint
"""
uselessCheckers = []
for checkerName in self.linter._checkers:
for checker in list(self.linter._checkers[checkerName]):
messagesOfChecker = set(checker.msgs)
if not messagesOfChecker.intersection(allowedMessages):
uselessCheckers.append(checker)
return uselessCheckers
def restrictCheckers(self, allowedMessages):
"""
Unregister useless checkers to speed up twistedchecker.
@param allowedMessages: output messages allowed in twistedchecker
"""
uselessCheckers = self.findUselessCheckers(allowedMessages)
# Unregister these checkers
for checker in uselessCheckers:
self.unregisterChecker(checker)
def getCheckerByName(self, checkerType):
"""
Get checker by given name.
@checkerType: type of the checker
"""
for checker in sum(list(self.linter._checkers.values()), []):
if isinstance(checker, checkerType):
return checker
return None
def allowPatternsForNameChecking(self, patternsFunc, patternsClass):
"""
Allow name exceptions by given patterns.
@param patternsFunc: patterns of special function names
@param patternsClass: patterns of special class names
"""
cfgParser = self.linter.cfgfile_parser
nameChecker = self.getCheckerByName(NameChecker)
if not nameChecker:
return
if patternsFunc:
regexFuncAdd = "|((%s).+)$" % "|".join(patternsFunc)
else:
regexFuncAdd = ""
if patternsClass:
regexClassAdd = "|((%s).+)$" % "|".join(patternsClass)
else:
regexClassAdd = ""
# Modify regex for function, method and class name.
regexMethod = cfgParser.get("BASIC", "method-rgx") + regexFuncAdd
regexFunction = cfgParser.get("BASIC", "function-rgx") + regexFuncAdd
regexClass = cfgParser.get("BASIC", "class-rgx") + regexClassAdd
# Save to config parser.
cfgParser.set("BASIC", "method-rgx", regexMethod)
cfgParser.set("BASIC", "function-rgx", regexFunction)
cfgParser.set("BASIC", "class-rgx", regexClass)
# Save to name checker.
nameChecker.config.method_rgx = re.compile(regexMethod)
nameChecker.config.function_rgx = re.compile(regexFunction)
nameChecker.config.class_rgx = re.compile(regexClass)
def getPathList(self, filesOrModules):
"""
Transform a list of modules to path.
@param filesOrModules: a list of modules (may be foo/bar.py or
foo.bar)
"""
pathList = []
for fileOrMod in filesOrModules:
if not os.path.exists(fileOrMod):
# May be given module is not not a path,
# then transform it to a path.
try:
filepath = file_from_modpath(fileOrMod.split('.'))
except (ImportError, SyntaxError):
# Could not load this module.
continue
if not os.path.exists(filepath):
# Could not find this module in file system.
continue
if os.path.basename(filepath) == "__init__.py":
filepath = os.path.dirname(filepath)
else:
filepath = fileOrMod
pathList.append(filepath)
return pathList
def setNameExceptions(self, filesOrModules):
"""
Find name exceptions in codes and allow them to be ignored
in checking.
@param filesOrModules: a list of modules (may be foo/bar.py or
foo.bar)
"""
pathList = self.getPathList(filesOrModules)
for path in pathList:
patternsFunc, patternsClass = findAllExceptions(path)
self.allowPatternsForNameChecking(patternsFunc, patternsClass)
def run(self, args):
"""
Setup the environment, and run pylint.
@param args: arguments will be passed to pylint
@type args: list of string
"""
# set output stream.
if self.outputStream:
self.linter.reporter.set_output(self.outputStream)
try:
args = self.linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
self.displayHelp()
# Check for 'strict-epydoc' option.
if self.allowOptions and not self.linter.option_value("strict-epydoc"):
for msg in ["W9203", "W9205"]:
self.linter.disable(msg)
# insert current working directory to the python path to have a correct
# behaviour.
sys.path.insert(0, os.getcwd())
# set exceptions for name checking.
self.setNameExceptions(args)
# check for diff option.
self.diffOption = self.linter.option_value("diff")
if self.diffOption:
self.prepareDiff()
# check codes.
self.linter.check(args)
# show diff of warnings if diff option on.
if self.diffOption:
diffCount = self.showDiffResults()
exitCode = 1 if diffCount else 0
sys.exit(exitCode)
sys.exit(self.linter.msg_status)
def prepareDiff(self):
"""
Prepare to run the checker and get diff results.
"""
self.streamForDiff = NativeStringIO()
self.linter.reporter.set_output(self.streamForDiff)
def showDiffResults(self):
"""
Show results when diff option on.
"""
try:
oldWarnings = self.parseWarnings(self._readDiffFile())
except:
sys.stderr.write(self.errorResultRead % self.diffOption)
return 1
newWarnings = self.parseWarnings(self.streamForDiff.getvalue())
diffWarnings = self.generateDiff(oldWarnings, newWarnings)
if diffWarnings:
diffResult = self.formatWarnings(diffWarnings)
self.outputStream.write(diffResult + "\n")
return len(diffWarnings)
else:
return 0
def _readDiffFile(self):
"""
Read content of diff file.
This is here to help with testing.
@return: File content.
@rtype: c{str}
"""
with open(self.diffOption) as f:
content = f.read()
return content
def generateDiff(self, oldWarnings, newWarnings):
"""
Generate diff between given two lists of warnings.
@param oldWarnings: parsed old warnings
@param newWarnings: parsed new warnings
@return: a dict object of diff
"""
diffWarnings = {}
for modulename in newWarnings:
diffInModule = (
newWarnings[modulename] -
oldWarnings.get(modulename, set()))
if diffInModule:
diffWarnings[modulename] = diffInModule
return diffWarnings
def parseWarnings(self, result):
"""
Transform result in string to a dict object.
@param result: a list of warnings in string
@return: a dict of warnings
"""
warnings = {}
currentModule = None
warningsCurrentModule = []
for line in result.splitlines():
if line.startswith(self.prefixModuleName):
# Save results for previous module
if currentModule:
warnings[currentModule] = set(warningsCurrentModule)
# Initial results for current module
moduleName = line.replace(self.prefixModuleName, "")
currentModule = moduleName
warningsCurrentModule = []
elif re.search(self.regexLineStart, line):
warningsCurrentModule.append(line)
else:
if warningsCurrentModule:
warningsCurrentModule[-1] += "\n" + line
# Save warnings for last module
if currentModule:
warnings[currentModule] = set(warningsCurrentModule)
return warnings
def formatWarnings(self, warnings):
"""
Format warnings to a list of results.
@param warnings: a dict of warnings produced by parseWarnings
@return: a list of warnings in string
"""
lines = []
for modulename in sorted(warnings):
lines.append(self.prefixModuleName + modulename)
lines.extend(sorted(warnings[modulename],
key=lambda x: x.split(":")[1]))
return "\n".join(lines)
def main():
"""
An entry point used in the setup.py to create a runnable script.
"""
runner = Runner()
runner.run(sys.argv[1:])
|
|
#!/usr/bin/env python
# coding=utf-8
"""
Read/write Bookeen dictionaries.
"""
from __future__ import absolute_import
import imp
import io
import os
import sqlite3
import zipfile
from penelope.collation_default import collate_function as collate_function_default
from penelope.utilities import print_debug
from penelope.utilities import print_error
from penelope.utilities import print_info
from penelope.utilities import create_temp_directory
from penelope.utilities import copy_file
from penelope.utilities import delete_directory
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2012-2016, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "3.1.3"
__email__ = "[email protected]"
__status__ = "Production"
CHUNK_FILE_PREFIX = "c_"
CHUNK_SIZE = 262144 # 262144 = 2^18
EMPTY_FILE_PATH = os.path.join(os.path.split(os.path.abspath(__file__))[0], "res/empty.idx")
HEADER = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\" [<!ENTITY ns \"•\">]><html xml:lang=\"%s\" xmlns=\"http://www.w3.org/1999/xhtml\"><head><title></title></head><body>"
def read(dictionary, args, input_file_string):
def read_single_dict(dictionary, args, single_dict):
# create tmp directory
tmp_path = create_temp_directory()
print_debug("Working in temp dir '%s'" % (tmp_path), args.debug)
if len(single_dict) == 1:
print_debug("Unzipping .install file...", args.debug)
zip_file_path = single_dict[0]
idx_file_path = os.path.join(tmp_path, "d.dict.idx")
dict_file_path = os.path.join(tmp_path, "d.dict")
zip_file_obj = zipfile.ZipFile(zip_file_path, "r")
for entry in zip_file_obj.namelist():
if entry.endswith(".dict.idx"):
zip_entry = zip_file_obj.open(entry)
idx_file_obj = io.open(idx_file_path, "wb")
idx_file_obj.write(zip_entry.read())
idx_file_obj.close()
zip_entry.close()
elif entry.endswith(".dict"):
zip_entry = zip_file_obj.open(entry)
dict_file_obj = io.open(dict_file_path, "wb")
dict_file_obj.write(zip_entry.read())
dict_file_obj.close()
zip_entry.close()
zip_file_obj.close()
print_debug("Unzipping .install file... done", args.debug)
else:
print_debug("Files .dict.idx and .dict already uncompressed...", args.debug)
idx_file_path = single_dict[0]
dict_file_path = single_dict[1]
for file_path in [idx_file_path, dict_file_path]:
if not os.path.exists(file_path):
print_error("File '%s' does not exist" % file_path)
return False
print_debug("Files .dict.idx and .dict already uncompressed... done", args.debug)
# unzip .dict file into tmp_path
print_debug("Unzipping .dict file...", args.debug)
zip_file_obj = zipfile.ZipFile(dict_file_path, "r")
for entry in zip_file_obj.namelist():
if not entry.endswith("/"):
zip_entry = zip_file_obj.open(entry)
entry_file_path = os.path.join(tmp_path, os.path.basename(entry))
entry_file_obj = io.open(entry_file_path, "wb")
entry_file_obj.write(zip_entry.read())
entry_file_obj.close()
zip_entry.close()
zip_file_obj.close()
print_debug("Unzipping .dict file... done", args.debug)
# read .dict.idx
print_debug("Reading .dict.idx file...", args.debug)
sql_connection = sqlite3.connect(idx_file_path)
sql_cursor = sql_connection.cursor()
sql_cursor.execute("select * from T_DictIndex")
index_data = sql_cursor.fetchall()
chunk_index_to_entries = {}
max_chunk_index = 1
for index_entry in index_data:
headword = index_entry[1]
if args.ignore_case:
headword = headword.lower()
offset = index_entry[2]
size = index_entry[3]
chunk_index = index_entry[4]
if chunk_index not in chunk_index_to_entries:
chunk_index_to_entries[chunk_index] = []
if chunk_index > max_chunk_index:
max_chunk_index = chunk_index
chunk_index_to_entries[chunk_index].append([headword, offset, size])
sql_cursor.close()
sql_connection.close()
print_debug("Reading .dict.idx file... done", args.debug)
# read c_* files
print_debug("Reading c_* files...", args.debug)
for chunk_index in range(1, max_chunk_index + 1):
print_debug(" Reading c_%d file..." % (chunk_index), args.debug)
chunk_file_path = os.path.join(tmp_path, "%s%d" % (CHUNK_FILE_PREFIX, chunk_index))
chunk_file_obj = io.open(chunk_file_path, "rb")
for entry in chunk_index_to_entries[chunk_index]:
headword = entry[0]
offset = entry[1]
size = entry[2]
chunk_file_obj.seek(offset)
definition_bytes = chunk_file_obj.read(size)
definition_unicode = definition_bytes.decode(args.input_file_encoding)
dictionary.add_entry(headword=headword, definition=definition_unicode)
chunk_file_obj.close()
print_debug(" Reading c_%d file... done" % (chunk_index), args.debug)
print_debug("Reading c_* files... done", args.debug)
# delete tmp directory
if args.keep:
print_info("Not deleting temp dir '%s'" % (tmp_path))
else:
delete_directory(tmp_path)
print_debug("Deleted temp dir '%s'" % (tmp_path), args.debug)
return True
single_dicts = []
for prefix in input_file_string.split(","):
if prefix.endswith(".install"):
single_dicts.append([prefix])
elif prefix.endswith(".dict"):
tentative_dict_path = prefix
tentative_idx_path = tentative_dict_path + u".idx"
if (os.path.exists(tentative_idx_path)) and (os.path.exists(tentative_dict_path)):
single_dicts.append([tentative_idx_path, tentative_dict_path])
else:
tentative_dict_path = prefix + u".dict"
tentative_idx_path = tentative_dict_path + u".idx"
if (os.path.exists(tentative_idx_path)) and (os.path.exists(tentative_dict_path)):
single_dicts.append([tentative_idx_path, tentative_dict_path])
if len(single_dicts) == 0:
print_error("Cannot find .install or .dict.idx/.dict files")
return None
for single_dict in single_dicts:
print_debug("Reading from file '%s'..." % (single_dict), args.debug)
result = read_single_dict(dictionary, args, single_dict)
if result:
print_debug("Reading from file '%s'... success" % (single_dict), args.debug)
else:
print_error("Reading from file '%s'... failed" % (single_dict))
return None
return dictionary
def write(dictionary, args, output_file_path):
# result to be returned
result = None
# get absolute path
output_file_path_absolute = os.path.abspath(output_file_path)
# get absolute path for collation function file
bookeen_collation_function_path = None
if args.bookeen_collation_function is not None:
bookeen_collation_function_path = os.path.abspath(args.bookeen_collation_function)
# create tmp directory
cwd = os.getcwd()
tmp_path = create_temp_directory()
print_debug("Working in temp dir '%s'" % (tmp_path), args.debug)
os.chdir(tmp_path)
# get the basename
base = os.path.basename(output_file_path)
if base.endswith(".zip"):
base = base[:-4]
# copy empty.idx into tmp_path
idx_file_path = base + u".dict.idx"
dict_file_path = base + u".dict"
copy_file(EMPTY_FILE_PATH, idx_file_path)
# open index
sql_connection = sqlite3.connect(idx_file_path)
# install collation in the index
collation_function = collate_function_default
if bookeen_collation_function_path is not None:
try:
collation_function = imp.load_source("", bookeen_collation_function_path).collate_function
print_debug("Using collation function from '%s'" % (bookeen_collation_function_path), args.debug)
except:
print_error("Unable to load collation function from '%s'. Using the default collation function instead." % (bookeen_collation_function_path))
sql_connection.create_collation("IcuNoCase", collation_function)
sql_connection.text_factory = str
# get a cursor and delete any data from the index file
sql_cursor = sql_connection.cursor()
sql_cursor.execute("delete from T_DictIndex")
# write c_* files
# each c_* file has MAX_CHUNK_SIZE < size <= (MAX_CHUNK_SIZE * 2) bytes (tentatively)
print_debug("Writing c_* files...", args.debug)
files_to_compress = []
current_offset = 0
chunk_index = 1
chunk_file_path = "%s%d" % (CHUNK_FILE_PREFIX, chunk_index)
files_to_compress.append(chunk_file_path)
chunk_file_obj = io.open(chunk_file_path, "wb")
for entry_index in dictionary.entries_index_sorted:
entry = dictionary.entries[entry_index]
definition_bytes = entry.definition.encode("utf-8")
definition_size = len(definition_bytes)
chunk_file_obj.write(definition_bytes)
# insert headword into index file
sql_tuple = (0, entry.headword, current_offset, definition_size, chunk_index)
sql_cursor.execute("insert into T_DictIndex values (?,?,?,?,?)", sql_tuple)
# insert synonyms into index file
if not args.ignore_synonyms:
for synonym in entry.get_synonyms():
sql_tuple = (0, synonym[0], current_offset, definition_size, chunk_index)
sql_cursor.execute("insert into T_DictIndex values (?,?,?,?,?)", sql_tuple)
# update offset
current_offset += definition_size
# if we reached CHUNK_SIZE, open the next c_* file
if current_offset > CHUNK_SIZE:
chunk_file_obj.close()
chunk_index += 1
chunk_file_path = "%s%d" % (CHUNK_FILE_PREFIX, chunk_index)
files_to_compress.append(chunk_file_path)
chunk_file_obj = io.open(chunk_file_path, "wb")
current_offset = 0
chunk_file_obj.close()
print_debug("Writing c_* files... done", args.debug)
# compress
print_debug("Compressing c_* files...", args.debug)
file_zip_obj = zipfile.ZipFile(dict_file_path, "w", zipfile.ZIP_DEFLATED)
for file_to_compress in files_to_compress:
file_to_compress = os.path.basename(file_to_compress)
file_zip_obj.write(file_to_compress)
file_zip_obj.close()
print_debug("Compressing c_* files... done", args.debug)
# update index metadata
print_debug("Updating index metadata...", args.debug)
header = HEADER % (args.language_from)
sql_cursor.execute("update T_DictInfo set F_xhtmlHeader=?", (header,))
sql_cursor.execute("update T_DictInfo set F_LangFrom=?", (args.language_from,))
sql_cursor.execute("update T_DictInfo set F_LangTo=?", (args.language_to,))
sql_cursor.execute("update T_DictInfo set F_Licence=?", (args.license,))
sql_cursor.execute("update T_DictInfo set F_Copyright=?", (args.copyright,))
sql_cursor.execute("update T_DictInfo set F_Title=?", (args.title,))
sql_cursor.execute("update T_DictInfo set F_Description=?", (args.description,))
sql_cursor.execute("update T_DictInfo set F_Year=?", (args.year,))
# the meaning of the following is unknown
sql_cursor.execute("update T_DictInfo set F_Alphabet=?", ("Z",))
sql_cursor.execute("update T_DictInfo set F_CollationLevel=?", ("1",))
sql_cursor.execute("update T_DictVersion set F_DictType=?", ("stardict",))
sql_cursor.execute("update T_DictVersion set F_Version=?", ("11",))
print_debug("Updating index metadata... done", args.debug)
# compact and close
sql_cursor.execute("vacuum")
sql_cursor.close()
sql_connection.close()
# create .install file or copy .dict.idx and .dict into requested output directory
parent_output_directory = os.path.split(output_file_path_absolute)[0]
if args.bookeen_install_file:
print_debug("Creating .install file...", args.debug)
file_zip_path = os.path.join(parent_output_directory, base + u".install")
file_zip_obj = zipfile.ZipFile(file_zip_path, "w", zipfile.ZIP_DEFLATED)
for file_to_compress in [dict_file_path, idx_file_path]:
file_to_compress = os.path.basename(file_to_compress)
file_zip_obj.write(file_to_compress)
file_zip_obj.close()
result = [file_zip_path]
print_debug("Creating .install file... done", args.debug)
else:
print_debug("Copying .dict.idx and .dict files...", args.debug)
dict_file_path_final = os.path.join(parent_output_directory, os.path.basename(dict_file_path))
idx_file_path_final = os.path.join(parent_output_directory, os.path.basename(idx_file_path))
copy_file(dict_file_path, dict_file_path_final)
copy_file(idx_file_path, idx_file_path_final)
result = [idx_file_path_final, dict_file_path_final]
print_debug("Copying .dict.idx and .dict files... done", args.debug)
# delete tmp directory
os.chdir(cwd)
if args.keep:
print_info("Not deleting temp dir '%s'" % (tmp_path))
else:
delete_directory(tmp_path)
print_debug("Deleted temp dir '%s'" % (tmp_path), args.debug)
return result
|
|
'''
..
Red9 Studio Pack: Maya Pipeline Solutions
Author: Mark Jackson
email: [email protected]
Red9 blog : http://red9-consultancy.blogspot.co.uk/
MarkJ blog: http://markj3d.blogspot.co.uk
This is the General library of utils used throughout the modules
These are abstract general functions
NOTHING IN THIS MODULE SHOULD REQUIRE RED9
'''
from __future__ import with_statement # required only for Maya2009/8
from functools import wraps
import maya.cmds as cmds
import maya.mel as mel
import os
import time
import inspect
import sys
import tempfile
import subprocess
import json
import itertools
#Only valid Red9 import
import Red9.startup.setup as r9Setup
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# Generic Utility Functions ---
#---------------------------------------------------------------------------------
def getCurrentFPS():
'''
returns the current frames per second as a number, rather than a useless string
'''
fpsDict = {"game":15.0, "film":24.0, "pal":25.0, "ntsc":30.0, "show":48.0, "palf":50.0, "ntscf":60.0}
return fpsDict[cmds.currentUnit(q=True, fullName=True, time=True)]
def forceToString(text):
'''
simple function to ensure that data can be passed correctly into
textFields for the UI (ensuring lists are converted)
'''
if issubclass(type(text), list):
return ','.join(text)
else:
return text
def formatPath(path):
'''
take a path and format it to forward slashes with catches for the exceptions
'''
return os.path.normpath(path).replace('\\','/').replace('\t','/t').replace('\n','/n').replace('\a', '/a')
def itersubclasses(cls, _seen=None):
"""
itersubclasses(cls)
http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
Iterator to yield full inheritance from a given class, including subclasses. This
is used in the MetaClass to build the RED9_META_REGISTERY inheritance dict
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def inspectFunctionSource(value):
'''
This is a neat little wrapper over the mel "whatIs" and Pythons inspect
module that finds the given functions source filePath, either Mel or Python
and opens the original file in the default program.
Great for developers
Supports all Mel functions, and Python Class / functions
'''
path=None
#sourceType=None
#Inspect for MEL
log.debug('inspecting given command: %s' % value)
#if issubclass(sourceType(value),str):
try:
path=mel.eval('whatIs("%s")' % value)
if path and not path=="Command":
path=path.split("in: ")[-1]
#if path:
#sourceType='mel'
elif path=="Command":
cmds.warning('%s : is a Command not a script' % value)
return False
except StandardError, error:
log.info(error)
#Inspect for Python
if not path or not os.path.exists(path):
log.info('This is not a known Mel command, inspecting Python libs for : %s' % value)
try:
log.debug('value : %s' % value)
log.debug('value isString : ', isinstance(value, str))
log.debug('value callable: ', callable(value))
log.debug('value is module : ', inspect.ismodule(value))
log.debug('value is method : ', inspect.ismethod(value))
if isinstance(value, str):
#if not callable(value):
value=eval(value)
path=inspect.getsourcefile(value)
if path:
#sourceType='python'
log.info('path : %s' % path)
except StandardError, error:
log.exception(error)
#Open the file with the default editor
#FIXME: If Python and you're a dev then the .py file may be set to open in the default
#Python runtime/editor and won't open as expected. Need to look at this.
if path and os.path.exists(path):
log.debug('NormPath : %s' % os.path.normpath(path))
os.startfile(os.path.normpath(path))
return True
else:
log.warning('No valid path or functions found matches selection')
return False
def getScriptEditorSelection():
'''
this is a hack to bypass an issue with getting the data back from the
ScriptEditorHistory scroll. We need to copy the selected text to the
clipboard then pull it back afterwards.
'''
import Red9.packages.pyperclip as pyperclip
control=mel.eval("$v=$gLastFocusedCommandControl")
executer=mel.eval("$v=$gLastFocusedCommandExecuter")
reporter=mel.eval("$v=$gLastFocusedCommandReporter")
func=""
if control==executer:
func=cmds.cmdScrollFieldExecuter(control, q=True, selectedText=True)
elif control == reporter:
cmds.cmdScrollFieldReporter(reporter, e=True, copySelection=True)
#func=Clipboard.getText()
#pyperclip.py : IN TESTING : Platform independant clipboard support
func=pyperclip.paste()
log.info('command caught: %s ' % func)
return func
# Context Managers and Decorators ---
#---------------------------------------------------------------------------------
def Timer(func):
'''
Simple timer decorator
'''
@wraps(func)
def wrapper(*args, **kws):
t1 = time.time()
res=func(*args, **kws)
t2 = time.time()
functionTrace=''
try:
#module if found
mod = inspect.getmodule(args[0])
functionTrace+='%s >>' % mod.__name__.split('.')[-1]
except:
log.debug('function module inspect failure')
try:
#class function is part of, if found
cls = args[0].__class__
functionTrace+='%s.' % args[0].__class__.__name__
except:
log.debug('function class inspect failure')
functionTrace += func.__name__
log.debug('TIMER : %s: took %0.3f ms' % (functionTrace, (t2 - t1) * 1000.0))
#log.info('%s: took %0.3f ms' % (func.func_name, (t2-t1)*1000.0))
return res
return wrapper
def runProfile(func):
'''
run the profiler - only ever used when debugging /optimizing function call speeds.
visualize the data using 'runsnakerun' to view the profiles and debug
'''
import cProfile
from time import gmtime, strftime
@wraps(func)
def wrapper(*args, **kwargs):
currentTime = strftime("%d-%m-%H.%M.%S", gmtime())
dumpFileName = 'c:/%s(%s).profile' % (func.__name__, currentTime)
def command():
func(*args, **kwargs)
profile = cProfile.runctx("command()", globals(), locals(), dumpFileName)
return profile
return wrapper
class AnimationContext(object):
"""
Simple Context Manager for restoring Animation settings
"""
def __init__(self):
self.autoKeyState=None
self.timeStore=None
def __enter__(self):
self.autoKeyState=cmds.autoKeyframe(query=True, state=True)
self.timeStore=cmds.currentTime(q=True)
cmds.undoInfo(openChunk=True)
def __exit__(self, exc_type, exc_value, traceback):
# Close the undo chunk, warn if any exceptions were caught:
cmds.autoKeyframe(state=self.autoKeyState)
cmds.currentTime(self.timeStore)
log.info('autoKeyState restored: %s' % self.autoKeyState)
log.info('currentTime restored: %f' % self.timeStore)
cmds.undoInfo(closeChunk=True)
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
# If this was false, it would re-raise the exception when complete
return True
class undoContext(object):
"""
Simple Context Manager for chunking the undoState
"""
def __init__(self, initialUndo=False, undoFuncCache=[], undoDepth=1):
'''
If initialUndo is True then the context manager will manage what to do on entry with
the undoStack. The idea is that if True the code will look at the last functions in the
undoQueue and if any of those mantch those in the undoFuncCache, it'll undo them to the
depth given.
WHY?????? This is specifically designed for things like floatFliders where you've
set a function to act on the 'dc' flag, (drag command) by passing that func through this
each drag will only go into the stack once, enabling you to drag as much as you want
and return to the initial state, pre ALL drags, in one chunk.
:param initialUndo: on first process whether undo on entry to the context manager
:param undoFuncCache: only if initialUndo = True : functions to catch in the undo stack
:param undoDepth: only if initialUndo = True : depth of the undo stack to go to
.. note ::
When adding funcs to this you CAN'T call the 'dc' command on any slider with a lambda func,
it has to call a specific func to catch in the undoStack. See Red9_AnimationUtils.FilterCurves
code for a live example of this setup.
'''
self.initialUndo = initialUndo
self.undoFuncCache = undoFuncCache
self.undoDepth = undoDepth
def undoCall(self):
for _ in range(1, self.undoDepth + 1):
#log.depth('undoDepth : %s' % i)
if [func for func in self.undoFuncCache if func in cmds.undoInfo(q=True, undoName=True)]:
cmds.undo()
def __enter__(self):
if self.initialUndo:
self.undoCall()
cmds.undoInfo(openChunk=True)
def __exit__(self, exc_type, exc_value, traceback):
cmds.undoInfo(closeChunk=True)
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
# If this was false, it would re-raise the exception when complete
return True
class ProgressBarContext(object):
'''
Context manager to make it easier to wrap progressBars
>>> #Example of using this in code
>>>
>>> step=5
>>> progressBar=r9General.ProgressBarContext(1000)
>>> progressBar.setStep(step)
>>> count=0
>>>
>>> #now do your code but increment and check the progress state
>>> with progressBar:
>>> for i in range(1:1000):
>>> if progressBar.isCanceled():
>>> print 'process cancelled'
>>> return
>>> progressBar.setProgress(count)
>>> count+=step
'''
def __init__(self, maxValue=100, interruptable=True):
self.disable=False
if r9Setup.mayaIsBatch():
self.disable=True
return
if maxValue <= 0:
raise ValueError("Max has to be greater than 0")
self._maxValue = maxValue
self._interruptable = interruptable
self._gMainProgressBar = mel.eval('$gmtmp = $gMainProgressBar')
def isCanceled(self):
if not self.disable:
return cmds.progressBar(self._gMainProgressBar, query=True, isCancelled=True)
def setText(self, text):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, status=text)
def setMaxValue(self, value):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, maxValue=int(value))
def setStep(self, value):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, step=int(value))
def setProgress(self, value):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, progress=int(value))
def reset(self):
if not self.disable:
self.setMaxValue(self._maxValue)
self.setText("")
def __enter__(self):
if not self.disable:
cmds.progressBar(self._gMainProgressBar,
edit=True,
beginProgress=True,
isInterruptable=self._interruptable,
maxValue=self._maxValue)
def __exit__(self, exc_type, exc_value, traceback):
if not self.disable:
cmds.progressBar(self._gMainProgressBar, edit=True, endProgress=True)
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
del(self)
return False # False so that the exceptiopn gets re-raised
class HIKContext(object):
"""
Simple Context Manager for restoring HIK Animation settings and managing HIK callbacks
"""
def __init__(self, NodeList):
self.objs=cmds.ls(sl=True, l=True)
self.NodeList=NodeList
self.managedHIK = False
def __enter__(self):
try:
#We set the keying group mainly for the copyKey code, stops the entire rig being
#manipulated on copy of single effector data
self.keyingGroups=cmds.keyingGroup(q=True, fil=True)
if [node for node in self.NodeList if cmds.nodeType(node) == 'hikIKEffector'\
or cmds.nodeType(node) == 'hikFKJoint']:
self.managedHIK = True
if self.managedHIK:
cmds.keyingGroup(fil="NoKeyingGroups")
log.info('Processing HIK Mode >> using HIKContext Manager:')
cmds.select(self.NodeList)
mel.eval("hikManipStart 1 1")
except:
self.managedHIK = False
def __exit__(self, exc_type, exc_value, traceback):
if self.managedHIK:
cmds.keyingGroup(fil=self.keyingGroups)
cmds.select(self.NodeList)
mel.eval("hikManipStop")
log.info('Exit HIK Mode >> HIKContext Manager:')
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
if self.objs:
cmds.select(self.objs)
return True
class SceneRestoreContext(object):
"""
Simple Context Manager for restoring Scene Global settings
Basically we store the state of all the modelPanels and timeLine
setups. Think of it like this, you export a scene, file -new, then re-import it
but you've now lost all the scenes UI and setups. This is capable of returning
the UI to the previous state. Maybe this could be a tool in it's own write?
Things stored:
* All UI viewport states, display and settings
* currentTime, timeRanges, timeUnits, sceneUnits, upAxis
* Main cameras and transforms for the 4 main modelPanels
* active sound and sound displays
>>> from Red9.core.Red9_General import SceneRestoreContext as sceneStore
>>> with sceneStore:
>>> #do something to modify the scene setup
>>> cmds.currentTime(100)
>>>
>>> #out of the context manager the scene will be restored as it was
>>> #before the code entered the context. (with sceneStore:)
"""
def __init__(self):
self.gPlayBackSlider=mel.eval("string $temp=$gPlayBackSlider")
self.dataStore={}
def __enter__(self):
self.storeSettings()
def __exit__(self, exc_type, exc_value, traceback):
self.restoreSettings()
if exc_type:
log.exception('%s : %s'%(exc_type, exc_value))
return True
def storeSettings(self):
'''
main work function, store all UI settings
'''
self.dataStore['autoKey'] = cmds.autoKeyframe(query=True, state=True)
# timeline management
self.dataStore['currentTime'] = cmds.currentTime(q=True)
self.dataStore['minTime'] = cmds.playbackOptions(q=True, min=True)
self.dataStore['maxTime'] = cmds.playbackOptions(q=True, max=True)
self.dataStore['startTime'] = cmds.playbackOptions(q=True, ast=True)
self.dataStore['endTime'] = cmds.playbackOptions(q=True, aet=True)
self.dataStore['playSpeed'] = cmds.playbackOptions(query=True, playbackSpeed=True)
# unit management
self.dataStore['timeUnit'] = cmds.currentUnit(q=True, fullName=True, time=True)
self.dataStore['sceneUnits'] = cmds.currentUnit(q=True, fullName=True, linear=True)
self.dataStore['upAxis'] = cmds.upAxis(q=True, axis=True)
#viewport colors
self.dataStore['displayGradient'] = cmds.displayPref(q=True, displayGradient=True)
#objects colors
self.dataStore['curvecolor'] = cmds.displayColor("curve", q=True, dormant=True)
#panel management
self.dataStore['panelStore'] = {}
for panel in ['modelPanel1', 'modelPanel2', 'modelPanel3', 'modelPanel4']:
if not cmds.modelPanel(panel, q=True, exists=True):
continue
self.dataStore['panelStore'][panel] = {}
self.dataStore['panelStore'][panel]['settings'] = cmds.modelEditor(panel, q=True, sts=True)
activeCam = cmds.modelPanel(panel, q=True, camera=True)
if not cmds.nodeType(activeCam) == 'camera':
activeCam = cmds.listRelatives(activeCam, f=True)[0]
self.dataStore['panelStore'][panel]['activeCam'] = activeCam
#camera management
#TODO : store the camera field of view etc also
self.dataStore['cameraTransforms']={}
for cam in ['persp', 'top', 'side', 'front']:
try:
self.dataStore['cameraTransforms'][cam] = [cmds.getAttr('%s.translate' % cam),
cmds.getAttr('%s.rotate' % cam),
cmds.getAttr('%s.scale' % cam)]
except:
log.debug("Camera doesn't exists : %s" % cam)
#sound management
self.dataStore['activeSound'] = cmds.timeControl(self.gPlayBackSlider, q=True, s=1)
self.dataStore['displaySound'] = cmds.timeControl(self.gPlayBackSlider, q=True, ds=1)
def restoreSettings(self):
'''
restore all UI settings
'''
cmds.autoKeyframe(state=self.dataStore['autoKey'])
#timeline management
cmds.currentTime(self.dataStore['currentTime'])
cmds.playbackOptions(min=self.dataStore['minTime'])
cmds.playbackOptions(max=self.dataStore['maxTime'])
cmds.playbackOptions(ast=self.dataStore['startTime'])
cmds.playbackOptions(aet=self.dataStore['endTime'])
cmds.playbackOptions(ps=self.dataStore['playSpeed'])
#unit management
cmds.currentUnit(time=self.dataStore['timeUnit'])
cmds.currentUnit(linear=self.dataStore['sceneUnits'])
cmds.upAxis(axis=self.dataStore['upAxis'])
log.info('Restored PlayBack / Timeline setup')
#viewport colors
cmds.displayPref(displayGradient=self.dataStore['displayGradient'])
cmds.displayRGBColor(resetToSaved=True)
#objects colors
cmds.displayColor("curve", self.dataStore['curvecolor'], dormant=True)
#panel management
for panel, data in self.dataStore['panelStore'].items():
try:
cmdString = data['settings'].replace('$editorName', panel)
mel.eval(cmdString)
log.info("Restored Panel Settings Data >> %s" % panel)
mel.eval('lookThroughModelPanel("%s","%s")' % (data['activeCam'], panel))
log.info("Restored Panel Active Camera Data >> %s >> cam : %s" % (panel, data['activeCam']))
except:
log.debug("Failed to fully Restore ActiveCamera Data >> %s >> cam : %s" % (panel, data['activeCam']))
# camera management
for cam, settings in self.dataStore['cameraTransforms'].items():
try:
cmds.setAttr('%s.translate' % cam, settings[0][0][0], settings[0][0][1], settings[0][0][2])
cmds.setAttr('%s.rotate' % cam, settings[1][0][0], settings[1][0][1], settings[1][0][2])
cmds.setAttr('%s.scale' % cam, settings[2][0][0], settings[2][0][1], settings[2][0][2])
log.info('Restored Default Camera Transform Data : % s' % cam)
except:
log.debug("Failed to fully Restore Default Camera Transform Data : % s" % cam)
#sound management
if self.dataStore['displaySound']:
cmds.timeControl(self.gPlayBackSlider, e=True, ds=1, sound=self.dataStore['activeSound'])
log.info('Restored Audio setup')
else:
cmds.timeControl(self.gPlayBackSlider, e=True, ds=0)
log.debug('Scene Restored fully')
return True
# General ---
#---------------------------------------------------------------------------------
def thumbNailScreen(filepath, width, height, mode='api'):
path='%s.bmp' % os.path.splitext(filepath)[0]
if mode=='api':
thumbnailApiFromView(path, width, height)
log.debug('API Thumb > path : %s' % path)
else:
thumbnailFromPlayBlast(path, width, height)
log.debug('Playblast Thumb > path : %s' % path)
def thumbnailFromPlayBlast(filepath, width, height):
'''
Generate a ThumbNail of the screen
Note: 'cf' flag is broken in 2012
:param filepath: path to Thumbnail
:param width: width of capture
:param height: height of capture
'''
filepath=os.path.splitext(filepath)[0]
filename=os.path.basename(filepath)
filedir=os.path.dirname(filepath)
#get modelPanel and camera
win = cmds.playblast(activeEditor=True).split('|')[-1]
cam = cmds.modelPanel(win, q=True, camera=True)
if not cmds.nodeType(cam) == 'camera':
cam = cmds.listRelatives(cam)[0]
storedformat = cmds.getAttr('defaultRenderGlobals.imageFormat')
storedResolutionGate = cmds.getAttr('%s.filmFit' % cam)
cmds.setAttr('defaultRenderGlobals.imageFormat', 20)
cmds.setAttr('%s.filmFit' % cam, 2) # set to Vertical so we don't get so much overscan
cmds.playblast(frame=cmds.currentTime(q=True), # startTime=cmds.currentTime(q=True),
# endTime=cmds.currentTime(q=True),
format="image",
filename=filepath,
width=width,
height=height,
percent=100,
quality=90,
forceOverwrite=True,
framePadding=0,
showOrnaments=False,
compression="BMP",
viewer=False)
cmds.setAttr('defaultRenderGlobals.imageFormat', storedformat)
cmds.setAttr('%s.filmFit' % cam, storedResolutionGate)
#Why do this rename? In Maya2012 the 'cf' flag fails which means you have to use
#the 'f' flag and that adds framePadding, crap I know! So we strip it and rename
#the file after it's made.
try:
newfile=[f for f in os.listdir(filedir)
if f.split('.bmp')[0].split('.')[0] == filename and not
'.pose' in f]
log.debug('Original Playblast file : %s' % newfile)
os.rename(os.path.join(filedir, newfile[0]), '%s.bmp' % filepath)
log.debug('Thumbnail Renamed : %s' % ('%s.bmp' % filepath))
return '%s.bmp' % filepath
except:
pass
def thumbnailApiFromView(filename, width, height, compression='bmp', modelPanel='modelPanel4'):
'''
grab the thumbnail direct from the buffer?
TODO: not yet figured out how you crop the data here?
'''
import maya.OpenMaya as OpenMaya
import maya.OpenMayaUI as OpenMayaUI
#Grab the last active 3d viewport
view = None
if modelPanel is None:
view = OpenMayaUI.M3dView.active3dView()
else:
try:
view = OpenMayaUI.M3dView()
OpenMayaUI.M3dView.getM3dViewFromModelEditor(modelPanel, view)
except:
#in case the given modelPanel doesn't exist!!
view = OpenMayaUI.M3dView.active3dView()
#read the color buffer from the view, and save the MImage to disk
image = OpenMaya.MImage()
view.readColorBuffer(image, True)
image.resize(width, height, True)
image.writeToFile(filename, compression)
log.info('API Thumbname call path : %s' % filename)
def getModifier():
'''
return the modifier key pressed
'''
mods = cmds.getModifiers()
if (mods & 1) > 0:
return 'Shift'
if (mods & 2) > 0:
return 'CapsLock'
if (mods & 4) > 0:
return 'Ctrl'
if (mods & 8) > 0:
return 'Alt'
else:
return False
# OS functions ---
#---------------------------------------------------------------------------------
class Clipboard:
'''
Get or Set data to the Windows clipboard...Used in the inspect code to grab the
ScriptEditor's selected history
CURRENTLY NOT BEING CALLED - switched to pyperclip.py module
'''
@staticmethod
def getText():
'''
Get clipboard text if available
'''
import ctypes
# declare win32 API
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
if not user32.OpenClipboard(0):
return ''
CF_TEXT = 1
hClipMem = user32.GetClipboardData(CF_TEXT)
kernel32.GlobalLock.restype = ctypes.c_char_p
value = kernel32.GlobalLock(hClipMem)
kernel32.GlobalUnlock(hClipMem)
user32.CloseClipboard()
if isinstance(value, str):
return value
elif hasattr(value, 'decode'):
return value.decode(sys.getfilesystemencoding())
else:
return ''
@staticmethod
def setText(value):
'''
Set clipbard text
'''
import ctypes
if not value:
raise IOError('No text passed to the clipboard')
if isinstance(value, unicode):
value=str(value)
if not isinstance(value, str):
raise TypeError('value should be of str type')
# declare win32 API
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
GlobalLock = kernel32.GlobalLock
memcpy = ctypes.cdll.msvcrt.memcpy
CF_TEXT = 1
GHND = 66
buf = ctypes.c_buffer(value.encode(sys.getfilesystemencoding()))
bufferSize = ctypes.sizeof(buf)
hGlobalMem = kernel32.GlobalAlloc(GHND, bufferSize)
GlobalLock.restype = ctypes.c_void_p
lpGlobalMem = GlobalLock(hGlobalMem)
memcpy(lpGlobalMem, ctypes.addressof(buf), bufferSize)
kernel32.GlobalUnlock(hGlobalMem)
if user32.OpenClipboard(0):
user32.EmptyClipboard()
user32.SetClipboardData(CF_TEXT, hGlobalMem)
user32.CloseClipboard()
log.info('Data set to clipboard : %s' % value)
return True
def os_OpenFileDirectory(path):
'''
open the given folder in the default OS browser
'''
import subprocess
path=os.path.abspath(path)
if sys.platform == 'win32':
subprocess.Popen('explorer /select, "%s"' % path)
elif sys.platform == 'darwin': # macOS
subprocess.Popen(['open', path])
else: # linux
try:
subprocess.Popen(['xdg-open', path])
except OSError:
raise OSError('unsupported xdg-open call??')
def os_OpenFile(filePath, *args):
'''
open the given file in the default program for this OS
'''
import subprocess
#log.debug('filePath : %s' % filePath)
#filePath=os.path.abspath(filePath)
#log.debug('abspath : %s' % filePath)
if sys.platform == 'win32':
os.startfile(filePath)
elif sys.platform == 'darwin': # macOS
subprocess.Popen(['open', filePath])
else: # linux
try:
subprocess.Popen(['xdg-open', filePath])
except OSError:
raise OSError('unsupported xdg-open call??')
def os_formatPath(path):
'''
take the given path and format it for Maya path
'''
return os.path.normpath(path).replace('\\','/').replace('\t','/t').replace('\n','/n').replace('\a', '/a')
def os_listFiles(folder, filters=[], byDate=False, fullPath=False):
'''
simple os wrap to list a dir with filters for file type and sort byDate
:param folder: folder to dir list
:param filters: list of file extensions to filter for
:param byData: sort the list by modified date, newest first!
'''
files = os.listdir(folder)
filtered=[]
if filters:
for f in files:
for flt in filters:
if f.lower().endswith(flt):
filtered.append(f)
files=filtered
if byDate and files:
files.sort(key=lambda x: os.stat(os.path.join(folder, x)).st_mtime)
files.reverse()
if fullPath:
files=[os_formatPath(os.path.join(folder, f)) for f in files]
return files
def os_openCrashFile(openDir=False):
'''
Open the default temp dir where Maya stores it's crash files and logs
'''
tempdir=tempfile.gettempdir()
if openDir:
os_OpenFileDirectory(tempdir)
else:
mayafiles = os_listFiles(tempdir, filters=['.ma','.mb'], byDate=True, fullPath=True)
cmds.file(mayafiles[0], open=True, f=True)
def os_fileCompare(file1, file2, openDiff=False):
'''
Pass in 2 files for diffComparision. If files are identical, ie there are no
differences then the code returns 0
:param file1: first file to compare with second file
:param file2: second file to compare against the first
:param openDiff: if a difference was found then boot Diffmerge UI, highlighting the diff
.. note::
This is a stub function that requires Diffmerge.exe, you can download from
https://sourcegear.com/diffmerge/.
Once downloaded drop it here Red9/pakcages/diffMerge.exe
'''
diffmerge=os.path.join(r9Setup.red9ModulePath(),'packages','diffMerge.exe')
outputDir=tempfile.gettempdir()
if os.path.exists(diffmerge):
process=subprocess.Popen([diffmerge, '-d', os.path.join(outputDir, 'diffmergeOutput.diff'), file1, file2],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
#output = process.communicate()
process.wait()
retcode = process.poll()
if not retcode:
log.info('Files are Identical')
return retcode
elif retcode==1:
log.info('Files are not Identical - use the openDiff flag to open up the differences in the editor')
if openDiff:
process=subprocess.Popen([diffmerge, file1, file2], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
return retcode
elif retcode==2:
raise IOError('Files failed to compare - issues prevented the compare processing both files')
return retcode
else:
log.warning('Diffmerge commandline was not found, compare aborted')
def writeJson(filepath=None, content=None):
'''
write json file to disk
:param filepath: file pat to drive where to write the file
:param content: file content
:return: None
'''
if filepath:
path = os.path.dirname(filepath)
if not os.path.exists(path):
os.makedirs(path)
name = open(filepath, "w")
name.write(json.dumps(content, sort_keys=True, indent=4))
name.close()
def readJson(filepath=None):
'''
file pat to drive where to read the file
:param filepath:
:return:
'''
if os.path.exists(filepath):
name = open(filepath, 'r')
try:
return json.load(name)
except ValueError:
pass
class abcIndex(object):
'''
Alphabetic iterator
'''
def __init__(self, lower=True):
if lower:
self.__abc = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
else:
self.__abc = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.__iter = 0
self.__iterator = None
self.__Iterate()
def __Iterate(self):
self.__iter += 1
self.__iterator = itertools.permutations(self.__abc, self.__iter)
def next(self):
'''
Return and Alphabetic index
'''
try:
temp = ''.join([x for x in self.__iterator.next()])
except StopIteration:
self.__Iterate()
temp = ''.join([x for x in self.__iterator.next()])
return '%s' % temp
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
from matplotlib.gridspec import GridSpec, SubplotSpec
from matplotlib import docstring
import matplotlib.artist as martist
from matplotlib.axes._axes import Axes
import warnings
from matplotlib.cbook import mplDeprecation
class SubplotBase(object):
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = list(map(int, s))
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit '
'integer')
self._subplotspec = GridSpec(rows, cols)[num - 1]
# num - 1 for converting from MATLAB to python indexing
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0] - 1:num[1]]
else:
if num < 1 or num > rows*cols:
raise ValueError(
"num must be 1 <= num <= {maxn}, not {num}".format(
maxn=rows*cols, num=num))
self._subplotspec = GridSpec(rows, cols)[int(num) - 1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def __reduce__(self):
# get the first axes class which does not
# inherit from a subplotbase
def not_subplotbase(c):
return issubclass(c, Axes) and not issubclass(c, SubplotBase)
axes_class = [c for c in self.__class__.mro()
if not_subplotbase(c)][0]
r = [_PicklableSubplotClassConstructor(),
(axes_class,),
self.__getstate__()]
return tuple(r)
def get_geometry(self):
"""get the subplot geometry, e.g., 2,2,3"""
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1 + 1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
"""change subplot geometry, e.g., from 1,1,1 to 2,2,3"""
self._subplotspec = GridSpec(numrows, numcols)[num - 1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
"""get the SubplotSpec instance associated with the subplot"""
return self._subplotspec
def set_subplotspec(self, subplotspec):
"""set the SubplotSpec instance associated with the subplot"""
self._subplotspec = subplotspec
def update_params(self):
"""update the subplot position from fig.subplotpars"""
self.figbox, self.rowNum, self.colNum, self.numRows, self.numCols = \
self.get_subplotspec().get_position(self.figure,
return_all=True)
def is_first_col(self):
return self.colNum == 0
def is_first_row(self):
return self.rowNum == 0
def is_last_row(self):
return self.rowNum == self.numRows - 1
def is_last_col(self):
return self.colNum == self.numCols - 1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
def _make_twin_axes(self, *kl, **kwargs):
"""
make a twinx axes of self. This is used for twinx and twiny.
"""
from matplotlib.projections import process_projection_requirements
kl = (self.get_subplotspec(),) + kl
projection_class, kwargs, key = process_projection_requirements(
self.figure, *kl, **kwargs)
ax2 = subplot_class_factory(projection_class)(self.figure,
*kl, **kwargs)
self.figure.add_subplot(ax2)
return ax2
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubplotBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = type(str("%sSubplot") % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
class _PicklableSubplotClassConstructor(object):
"""
This stub class exists to return the appropriate subplot
class when __call__-ed with an axes class. This is purely to
allow Pickling of Axes and Subplots.
"""
def __call__(self, axes_class):
# create a dummy object instance
subplot_instance = _PicklableSubplotClassConstructor()
subplot_class = subplot_class_factory(axes_class)
# update the class to the desired subplot class
subplot_instance.__class__ = subplot_class
return subplot_instance
docstring.interpd.update(Axes=martist.kwdoc(Axes))
docstring.interpd.update(Subplot=martist.kwdoc(Axes))
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
from __future__ import unicode_literals
import hashlib
import imp
import importlib
import os
import sys
import textwrap
import zipfile
from collections import namedtuple
from datetime import datetime
from croniter import CroniterBadCronError, CroniterBadDateError, CroniterNotAlphaError, croniter
import six
from airflow import settings
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDagBag
from airflow.exceptions import AirflowDagCycleException
from airflow.executors import get_default_executor
from airflow.settings import Stats
from airflow.utils import timezone
from airflow.utils.dag_processing import list_py_file_paths, correct_maybe_zipped
from airflow.utils.db import provide_session
from airflow.utils.helpers import pprinttable
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.timeout import timeout
class DagBag(BaseDagBag, LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:type dag_folder: unicode
:param executor: the executor to use when executing task instances
in this DagBag
:param include_examples: whether to include the examples that ship
with airflow or not
:type include_examples: bool
:param has_logged: an instance boolean that gets flipped from False to True after a
file has been skipped. This is to prevent overloading the user with logging
messages about skipped files. Therefore only once per DagBag is a file logged
being skipped.
:param store_serialized_dags: Read DAGs from DB if store_serialized_dags is ``True``.
If ``False`` DAGs are read from python files.
:type store_serialized_dags: bool
"""
# static class variables to detetct dag cycle
CYCLE_NEW = 0
CYCLE_IN_PROGRESS = 1
CYCLE_DONE = 2
DAGBAG_IMPORT_TIMEOUT = conf.getint('core', 'DAGBAG_IMPORT_TIMEOUT')
UNIT_TEST_MODE = conf.getboolean('core', 'UNIT_TEST_MODE')
SCHEDULER_ZOMBIE_TASK_THRESHOLD = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
def __init__(
self,
dag_folder=None,
executor=None,
include_examples=conf.getboolean('core', 'LOAD_EXAMPLES'),
safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE'),
store_serialized_dags=False,
):
# do not use default arg in signature, to fix import cycle on plugin load
if executor is None:
executor = get_default_executor()
dag_folder = dag_folder or settings.DAGS_FOLDER
self.dag_folder = dag_folder
self.dags = {}
# the file's last modified timestamp when we last read it
self.file_last_changed = {}
self.executor = executor
self.import_errors = {}
self.has_logged = False
self.store_serialized_dags = store_serialized_dags
self.collect_dags(
dag_folder=dag_folder,
include_examples=include_examples,
safe_mode=safe_mode)
def size(self):
"""
:return: the amount of dags contained in this dagbag
"""
return len(self.dags)
@property
def dag_ids(self):
return self.dags.keys()
def get_dag(self, dag_id):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
:param dag_id: DAG Id
:type dag_id: str
"""
from airflow.models.dag import DagModel # Avoid circular import
# Only read DAGs from DB if this dagbag is store_serialized_dags.
if self.store_serialized_dags:
# Import here so that serialized dag is only imported when serialization is enabled
from airflow.models.serialized_dag import SerializedDagModel
if dag_id not in self.dags:
# Load from DB if not (yet) in the bag
row = SerializedDagModel.get(dag_id)
if not row:
return None
dag = row.dag
for subdag in dag.subdags:
self.dags[subdag.dag_id] = subdag
self.dags[dag.dag_id] = dag
return self.dags.get(dag_id)
# If asking for a known subdag, we want to refresh the parent
dag = None
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.is_subdag:
root_dag_id = dag.parent_dag.dag_id
# Needs to load from file for a store_serialized_dags dagbag.
enforce_from_file = False
if self.store_serialized_dags and dag is not None:
from airflow.serialization.serialized_objects import SerializedDAG
enforce_from_file = isinstance(dag, SerializedDAG)
# If the dag corresponding to root_dag_id is absent or expired
orm_dag = DagModel.get_current(root_dag_id)
if (orm_dag and (
root_dag_id not in self.dags or
(
orm_dag.last_expired and
dag.last_loaded < orm_dag.last_expired
)
)) or enforce_from_file:
# Reprocess source file
found_dags = self.process_file(
filepath=correct_maybe_zipped(orm_dag.fileloc), only_if_updated=False)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
from airflow.models.dag import DAG # Avoid circular import
found_dags = []
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return found_dags
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if only_if_updated \
and filepath in self.file_last_changed \
and file_last_changed_on_disk == self.file_last_changed[filepath]:
return found_dags
except Exception as e:
self.log.exception(e)
return found_dags
mods = []
is_zipfile = zipfile.is_zipfile(filepath)
if not is_zipfile:
if safe_mode:
with open(filepath, 'rb') as f:
content = f.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = file_last_changed_on_disk
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s assumed to contain no DAGs. Skipping.",
filepath)
return found_dags
self.log.debug("Importing %s", filepath)
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
mod_name = ('unusual_prefix_' +
hashlib.sha1(filepath.encode('utf-8')).hexdigest() +
'_' + org_mod_name)
if mod_name in sys.modules:
del sys.modules[mod_name]
with timeout(self.DAGBAG_IMPORT_TIMEOUT):
try:
m = imp.load_source(mod_name, filepath)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
else:
zip_file = zipfile.ZipFile(filepath)
for mod in zip_file.infolist():
head, _ = os.path.split(mod.filename)
mod_name, ext = os.path.splitext(mod.filename)
if not head and (ext == '.py' or ext == '.pyc'):
if mod_name == '__init__':
self.log.warning("Found __init__.%s at root of %s", ext, filepath)
if safe_mode:
with zip_file.open(mod.filename) as zf:
self.log.debug("Reading %s from %s", mod.filename, filepath)
content = zf.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = (
file_last_changed_on_disk)
# todo: create ignore list
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s assumed to contain no DAGs. Skipping.",
filepath)
if mod_name in sys.modules:
del sys.modules[mod_name]
try:
sys.path.insert(0, filepath)
m = importlib.import_module(mod_name)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
for m in mods:
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
if not dag.full_filepath:
dag.full_filepath = filepath
if dag.fileloc != filepath and not is_zipfile:
dag.fileloc = filepath
try:
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
if isinstance(dag.normalized_schedule_interval, six.string_types):
croniter(dag.normalized_schedule_interval)
found_dags.append(dag)
found_dags += dag.subdags
except (CroniterBadCronError,
CroniterBadDateError,
CroniterNotAlphaError) as cron_e:
self.log.exception("Failed to bag_dag: %s", dag.full_filepath)
self.import_errors[dag.full_filepath] = \
"Invalid Cron expression: " + str(cron_e)
self.file_last_changed[dag.full_filepath] = \
file_last_changed_on_disk
except AirflowDagCycleException as cycle_exception:
self.log.exception("Failed to bag_dag: %s", dag.full_filepath)
self.import_errors[dag.full_filepath] = str(cycle_exception)
self.file_last_changed[dag.full_filepath] = \
file_last_changed_on_disk
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
@provide_session
def kill_zombies(self, zombies, session=None):
"""
Fail given zombie tasks, which are tasks that haven't
had a heartbeat for too long, in the current DagBag.
:param zombies: zombie task instances to kill.
:type zombies: airflow.utils.dag_processing.SimpleTaskInstance
:param session: DB session.
:type session: sqlalchemy.orm.session.Session
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
for zombie in zombies:
if zombie.dag_id in self.dags:
dag = self.dags[zombie.dag_id]
if zombie.task_id in dag.task_ids:
task = dag.get_task(zombie.task_id)
ti = TaskInstance(task, zombie.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = zombie.start_date
ti.end_date = zombie.end_date
ti.try_number = zombie.try_number
ti.state = zombie.state
ti.test_mode = self.UNIT_TEST_MODE
ti.handle_failure("{} detected as zombie".format(ti),
ti.test_mode, ti.get_template_context())
self.log.info('Marked zombie job %s as %s', ti, ti.state)
session.commit()
def bag_dag(self, dag, parent_dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
Throws AirflowDagCycleException if a cycle is detected in this dag or its subdags
"""
dag.test_cycle() # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
for task in dag.tasks:
settings.policy(task)
subdags = dag.subdags
try:
for subdag in subdags:
subdag.full_filepath = dag.full_filepath
subdag.parent_dag = dag
subdag.is_subdag = True
self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag)
self.dags[dag.dag_id] = dag
self.log.debug('Loaded DAG %s', dag)
except AirflowDagCycleException as cycle_exception:
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception('Exception bagging dag: %s', dag.dag_id)
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if dag == root_dag:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise cycle_exception
def collect_dags(
self,
dag_folder=None,
only_if_updated=True,
include_examples=conf.getboolean('core', 'LOAD_EXAMPLES'),
safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a ``.airflowignore`` file is found while processing
the directory, it will behave much like a ``.gitignore``,
ignoring files that match any of the regex patterns specified
in the file.
**Note**: The patterns in .airflowignore are treated as
un-anchored regexes, not shell-like glob patterns.
"""
if self.store_serialized_dags:
return
self.log.info("Filling up the DagBag from %s", dag_folder)
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
FileLoadStat = namedtuple(
'FileLoadStat', "file duration dag_num task_num dags")
dag_folder = correct_maybe_zipped(dag_folder)
dags_by_name = {}
for filepath in list_py_file_paths(dag_folder, safe_mode=safe_mode,
include_examples=include_examples):
try:
ts = timezone.utcnow()
found_dags = self.process_file(
filepath, only_if_updated=only_if_updated,
safe_mode=safe_mode)
dag_ids = [dag.dag_id for dag in found_dags]
dag_id_names = str(dag_ids)
td = timezone.utcnow() - ts
td = td.total_seconds() + (
float(td.microseconds) / 1000000)
dags_by_name[dag_id_names] = dag_ids
stats.append(FileLoadStat(
filepath.replace(settings.DAGS_FOLDER, ''),
td,
len(found_dags),
sum([len(dag.tasks) for dag in found_dags]),
dag_id_names,
))
except Exception as e:
self.log.exception(e)
self.dagbag_stats = sorted(
stats, key=lambda x: x.duration, reverse=True)
for file_stat in self.dagbag_stats:
dag_ids = dags_by_name[file_stat.dags]
if file_stat.dag_num >= 1:
# if we found multiple dags per file, the stat is 'dag_id1 _ dag_id2'
dag_names = '_'.join(dag_ids)
Stats.timing('dag.loading-duration.{}'.
format(dag_names),
file_stat.duration)
def collect_dags_from_db(self):
"""Collects DAGs from database."""
from airflow.models.serialized_dag import SerializedDagModel
start_dttm = timezone.utcnow()
self.log.info("Filling up the DagBag from database")
# The dagbag contains all rows in serialized_dag table. Deleted DAGs are deleted
# from the table by the scheduler job.
self.dags = SerializedDagModel.read_all_dags()
# Adds subdags.
# DAG post-processing steps such as self.bag_dag and croniter are not needed as
# they are done by scheduler before serialization.
subdags = {}
for dag in self.dags.values():
for subdag in dag.subdags:
subdags[subdag.dag_id] = subdag
self.dags.update(subdags)
Stats.timing('collect_db_dags', timezone.utcnow() - start_dttm)
def dagbag_report(self):
"""Prints a report around DagBag loading stats"""
report = textwrap.dedent("""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
""")
stats = self.dagbag_stats
return report.format(
dag_folder=self.dag_folder,
duration=sum([o.duration for o in stats]),
dag_num=sum([o.dag_num for o in stats]),
task_num=sum([o.task_num for o in stats]),
table=pprinttable(stats),
)
|
|
from __future__ import print_function
from __future__ import division
from builtins import next
from builtins import str
from past.utils import old_div
from builtins import object
import collections
from copy import copy
import pandas as pd
import PyAnalysisTools.PlottingUtils.Formatting as FM
import PyAnalysisTools.PlottingUtils.PlotableObject as PO
import PyAnalysisTools.PlottingUtils.PlottingTools as PT
import ROOT
from PyAnalysisTools.AnalysisTools.MLHelper import Root2NumpyConverter
from PyAnalysisTools.PlottingUtils import HistTools as HT
from PyAnalysisTools.PlottingUtils import set_batch_mode
from PyAnalysisTools.PlottingUtils.BasePlotter import BasePlotter
from PyAnalysisTools.PlottingUtils.PlotConfig import get_histogram_definition
from PyAnalysisTools.base.ProcessConfig import find_process_config, parse_and_build_process_config
from PyAnalysisTools.base.FileHandle import FileHandle
from PyAnalysisTools.base import _logger, InvalidInputError
from PyAnalysisTools.base.JSONHandle import JSONHandle
from PyAnalysisTools.base.Modules import load_modules
from PyAnalysisTools.base.OutputHandle import OutputFileHandle
class EventComparisonReader(object):
def __init__(self, **kwargs):
if 'input_files' not in kwargs:
_logger.error('No input file provided')
raise InvalidInputError('Missing input files')
kwargs.setdefault('compare_files', None)
self.input_files = kwargs['input_files']
self.compare_files = kwargs['compare_files']
self.tree_name = kwargs['tree_name']
for opt, val in list(kwargs.items()):
if not hasattr(self, opt):
setattr(self, opt, val)
def get_instance(self, plot_config):
return Reader(plot_config=plot_config, **self.__dict__)
def get_data(self):
data = {}
for plot_config in self.plot_configs:
getter = self.get_instance(plot_config)
data[plot_config] = getter.get_data()
return data
def make_hist(self, file_handle, compare_file_handle, plot_config, cut_name, cut_string, tree_name=None):
hist = get_histogram_definition(plot_config)
print(file_handle.process)
print(cut_name)
hist.SetName('_'.join([hist.GetName(), file_handle.process, cut_name]))
if tree_name is None:
tree_name = self.tree_name
# try:
t1 = file_handle.tfile.Get('Nominal/BaseSelection_tree_finalSelection')
t2 = compare_file_handle.Get('Nominal/BaseSelection_tree_finalSelection')
# t1 = file_handle.Get(tree_name)
# t2 = compare_file_handle.Get(tree_name)
if isinstance(hist, ROOT.TH1F):
var = plot_config.dist
else:
var = plot_config.dist.split(":")[0]
print(var)
print(plot_config.dist)
print(plot_config)
branch_list = ['eventNumber', var]
# cut_string = 'jet_n > 0 && jet_pt[0] > 60000. && MET_calo > 80000.'
# cut_string = 'jet_n > 0'
# if run == 'collisionRun':
# branch_list.append('HLT_j55_0eta240_xe50_L1J30_EMPTYAcceptance')
# cut_string += ' && passGRL==1 && HLT_j55_0eta240_xe50_L1J30_EMPTYAcceptance==0' # firstempty
# cut_string += ' && passGRL==1 && HLT_j55_0eta240_xe50_L1J30_EMPTYAcceptance==1' # empty
# else:
# branch_list.append('HLT_noalg_cosmiccalo_L1EM3_EMPTYAcceptance')
# branch_list.append('HLT_noalg_cosmiccalo_L1RD1_EMPTYAcceptance')
# branch_list.append('HLT_noalg_cosmiccalo_L1J12_EMPTYAcceptance')
# branch_list.append('HLT_noalg_cosmiccalo_L1J30_EMPTYAcceptance')
# branch_list.append('HLT_j0_L1J12_EMPTYAcceptance')
# branch_list.append('HLT_ht0_L1J12_EMPTYAcceptance')
# cut_string += ' && (HLT_noalg_cosmiccalo_L1EM3_EMPTYAcceptance==1 ||
# HLT_noalg_cosmiccalo_L1RD1_EMPTYAcceptance==1 || HLT_noalg_cosmiccalo_L1J12_EMPTYAcceptance==1 ||
# HLT_noalg_cosmiccalo_L1J30_EMPTYAcceptance==1 || HLT_j0_L1J12_EMPTYAcceptance==1 ||
# HLT_ht0_L1J12_EMPTYAcceptance==1)'
converter = Root2NumpyConverter(branch_list)
data1 = pd.DataFrame(converter.convert_to_array(t1, cut_string))
data2 = pd.DataFrame(converter.convert_to_array(t2, cut_string))
if var.endswith('_n'):
data1[[var]] = data1[[var]].astype(int)
data2[[var]] = data2[[var]].astype(int)
# if var.endswith('calo'):
# hist = ROOT.TH1D(var, '', 200, -200, 200.)
# else:
# hist = ROOT.TH1D(var, '', 100, -100, 100.)
print(type(hist))
if isinstance(hist, ROOT.TH1F):
for _, i in data1.iterrows():
e2 = data2[data2['eventNumber'] == i['eventNumber']]
if len(e2) == 0:
hist.Fill(-99999)
continue
hist.Fill((i[var] - e2[var]))
# if var.startswith('MET' or 'muon'):
# hist.Fill((i[var] - e2[var])/1000.)
# elif var.startswith('muon'):
# hist.Fill((i[var] - e2[var]))
# else:
# li_jet = list(i[var])
# le2_jet = list(e2[var])[0]
# if len(li_jet) != len(le2_jet):
# hist.Fill(-88888)
# continue
# for j in range(len(li_jet)):
# if var.endswith('Pt') or var.endswith('pt'):
# hist.Fill((li_jet[j] - le2_jet[j])/1000.)
# else:
# hist.Fill(li_jet[j] - le2_jet[j])
hist.SetName('_'.join([hist.GetName(), file_handle.process, cut_name]))
# _logger.debug("try to access config for process %s" % file_handle.process)
# except Exception as e:
# raise e
return hist
if isinstance(hist, ROOT.TH2F):
print('starting 2D')
for _, i in data1.iterrows():
e_cos = data2[data2['eventNumber'] == i['eventNumber']]
# print 'Start new'
# print e_cos, "\n"
# print e_cos[var], "\n"
# print i, "\n"
# print i[var], "\n"
# print len(e_cos[var])
# print len(i[var])
value_data1 = i[var]
value_data2 = e_cos[var]
try:
if len(value_data1) == 0 or len(value_data2) == 0:
hist.Fill(-99999., -99999.)
continue
# print type(value_data2), type(value_data1)
# if len(value_data2[var]) == 0:
# hist.Fill(-99999., -99999.)
# continue
hist.Fill(value_data1, value_data2)
except TypeError:
# print 'Filling: ', value_data1, len(value_data1)
if len(value_data2) == 0:
hist.Fill(-99999., -99999.)
continue
# print 'val 1: ', value_data1
# print 'val 2: ', value_data2
hist.Fill(value_data1, value_data2)
# pass
# print 'FOO'
# print type(value_data1), value_data1
# exit()
# if len(e_cos[var]) == 0 or len(i[var]) == 0:
# hist.Fill(-99999., -99999.)
# continue
# li_col = list(i[var])
# le_cos = list(e_cos[var])
# if len(le_cos[0]) == 0 or len(li_col) == 0:
# hist.Fill(-99999., -99999.)
# continue
# print (li_col[0]/1000.), (le_cos[0][0]/1000.), (li_col[0] - le_cos[0][0])
# hist.Fill(li_col[0], le_cos[0][0])
hist.SetName('_'.join([hist.GetName(), file_handle.process, cut_name]))
return hist
def make_hists(self, file_handles, compare_file_handles, plot_config, cut_name, cut_string, tree_name=None):
result = None
for fh in file_handles:
hist = self.make_hist(fh, compare_file_handles.tfile, plot_config, cut_name, cut_string, tree_name)
if result is None:
result = hist
continue
result.Add(hist)
return result
class Reader(EventComparisonReader):
def __init__(self, **kwargs):
self.process_configs = kwargs['process_configs']
input_files = kwargs['input_files']
self.file_handles = [FileHandle(file_name=fn, switch_off_process_name_analysis=True) for fn in input_files]
self.file_handles = self.merge_file_handles(self.file_handles, self.process_configs)
# if hasattr(plot_config, 'compare_files'):
compare_files = kwargs['compare_files']
self.compare_file_handles = [FileHandle(file_name=fn, switch_off_process_name_analysis=True) for fn in
compare_files]
self.compare_file_handles = self.merge_file_handles(self.compare_file_handles, self.process_configs)
self.plot_config = kwargs['plot_config']
self.tree_name = kwargs['tree_name']
for opt, value in list(kwargs.items()):
if not hasattr(self, opt):
setattr(self, opt, value)
@staticmethod
def merge_file_handles(file_handles, process_configs):
tmp_file_handles = collections.OrderedDict()
for fh in file_handles:
parent_process = find_process_config(fh.process, process_configs)
if parent_process not in tmp_file_handles:
tmp_file_handles[parent_process] = [fh]
continue
tmp_file_handles[parent_process].append(fh)
return tmp_file_handles
def get_data(self):
plotable_objects = []
cut_string = '&&'.join([str(v) for v in self.plot_config.cuts])
print(cut_string)
reference = collections.OrderedDict()
for process, file_handles in list(self.file_handles.items()):
# compare_file_handle = self.compare_file_handles['collisionRun_cosmicsReco_standardOFCs'][0]
# compare_file_handle = self.compare_file_handles['cosmicRun_cosmicsReco_standardOFCs'][0]
# compare_file_handle = self.compare_file_handles['collisionRun_cosmicsReco_iterativeOFCs'][0]
# print self.compare_file_handles['cosmicRun_cosmicsReco_iterativeOFCs'][0]
compare_file_handle = list(self.compare_file_handles.items())[0][1][0]
reference[process] = self.make_hists(file_handles, compare_file_handle, self.plot_config, '', cut_string,
self.tree_name)
for k_ref, v_ref in list(reference.items()):
v_ref.SetDirectory(0)
plotable_objects.append(PO.PlotableObject(plot_object=v_ref, label='', process=k_ref))
return plotable_objects
class EventComparisonPlotter(BasePlotter):
def __init__(self, **kwargs):
if 'input_files' not in kwargs:
_logger.error("No input files provided")
raise InvalidInputError("Missing input files")
if 'plot_config_files' not in kwargs:
_logger.error("No config file provided")
raise InvalidInputError("Missing config")
if 'output_dir' not in kwargs:
_logger.warning("No output directory given. Using ./")
kwargs.setdefault('batch', True)
kwargs.setdefault('tree_name', None)
kwargs.setdefault('output_dir', './')
kwargs.setdefault('output_tag', None)
kwargs.setdefault('process_config_files', None)
kwargs.setdefault('systematics', 'Nominal')
kwargs.setdefault('ref_mod_modules', None)
kwargs.setdefault('inp_mod_modules', None)
kwargs.setdefault('read_hist', False)
kwargs.setdefault('n_files_handles', 1)
kwargs.setdefault('nfile_handles', 1)
kwargs.setdefault('ref_module_config_file', None)
kwargs.setdefault('module_config_file', None)
kwargs.setdefault('json', False)
kwargs.setdefault('file_extension', ['.pdf'])
if kwargs['json']:
kwargs = JSONHandle(kwargs['json']).load()
set_batch_mode(kwargs['batch'])
super(EventComparisonPlotter, self).__init__(**kwargs)
self.input_files = kwargs['input_files']
self.output_handle = OutputFileHandle(overload='eventComparison', output_file_name='EventCompare.root',
extension=kwargs['file_extension'], **kwargs)
for attr, value in list(kwargs.items()):
if not hasattr(self, attr):
setattr(self, attr, value)
# if self.systematics is None:
# self.systematics = 'Nominal'
if 'process_config_files' in kwargs:
self.process_configs = parse_and_build_process_config(kwargs['process_config_files'])
self.ref_modules = load_modules(kwargs['ref_mod_modules'], self)
self.modules = load_modules(kwargs['module_config_file'], self)
self.modules_data_providers = [m for m in self.modules if m.type == 'DataProvider']
self.module_filters = [m for m in self.modules if m.type == 'Filter']
self.analyse_plot_config()
# self.update_color_palette()
self.getter = EventComparisonReader(plot_configs=self.plot_configs, process_configs=self.process_configs,
**kwargs)
if not kwargs['json']:
JSONHandle(kwargs['output_dir'], **kwargs).dump()
def analyse_plot_config(self):
if self.plot_configs is None:
return None
pc = next((pc for pc in self.plot_configs if pc.name == 'parse_from_file'), None)
if pc is None:
return
if not hasattr(self, 'reference_files'):
_logger.error("Request to parse plot configs from file, but no reference file given. Breaking up!")
exit(0)
file_handles = [FileHandle(file_name=reference_file) for reference_file in self.reference_files]
objects = []
for file_handle in file_handles:
objects += file_handle.get_objects_by_type('TCanvas')
self.plot_configs.remove(pc)
for obj in objects:
new_pc = copy(pc)
new_pc.dist = obj.GetName()
self.plot_configs.append(new_pc)
def update_color_palette(self):
if isinstance(self.common_config.colors[0], str):
self.color_palette = [getattr(ROOT, 'k' + color.capitalize()) for color in self.common_config.colors]
elif isinstance(self.common_config.colors[0], int):
self.color_palette = [color for color in self.common_config.colors]
else:
_logger.warning("Unsuppored type %s for colors in common_config" % type(self.common_config.colors[0]))
def make_comparison_plots(self):
data = self.getter.get_data()
for k, v in list(data.items()):
self.make_comparison_plot(k, v)
self.output_handle.write_and_close()
def make_comparison_plot(self, plot_config, data):
for i in data:
HT.merge_overflow_bins(i.plot_object)
HT.merge_underflow_bins(i.plot_object)
for i, ref in enumerate(data):
setattr(ref, 'draw_option', plot_config.draw)
index = i - (int(old_div(i, len(PO.color_palette))) * len(PO.color_palette))
marker_style_index = i - (int(old_div(i, len(PO.marker_style_palette_filled)))
* len(PO.marker_style_palette_filled))
index_homogen = i - (int(old_div(i, len(PO.line_style_palette_homogen)))
* len(PO.line_style_palette_homogen))
if plot_config.draw in ['Marker', 'marker', 'P', 'p']:
setattr(ref, 'marker_color', PO.color_palette[index])
setattr(ref, 'marker_style', PO.marker_style_palette_filled[marker_style_index])
setattr(ref, 'line_color', PO.color_palette[index])
elif plot_config.draw in ['Line', 'line', 'L', 'l']:
setattr(ref, 'line_color', PO.color_palette[index])
setattr(ref, 'line_style', PO.line_style_palette_homogen[index_homogen])
elif plot_config.draw in ['Hist', 'hist', 'H', 'h']:
setattr(ref, 'fill_color', PO.color_palette[index])
# setattr(ref, 'fill_style', PO.fill_style_palette_left[index])
setattr(ref, 'fill_style', 0)
setattr(ref, 'line_color', PO.color_palette[index])
setattr(ref, 'marker_color', PO.color_palette[index])
# canvas = PT.plot_objects(map(lambda x : x.plot_object, reference_hists+compare_hists),
# plot_config, plotable_objects=reference_hists+compare_hists)
canvas = PT.plot_objects(data, plot_config)
canvas.SetName(plot_config.name.replace(' ', '_'))
if self.process_configs:
for ref in data:
if hasattr(plot_config, 'ignore_process_labels') and plot_config.ignore_process_labels:
ref.label = '{:s}'.format(ref.label)
else:
ref.label = '{:s} {:s}'.format(find_process_config(ref.process, self.process_configs).label,
ref.label)
ROOT.SetOwnership(canvas, False)
if plot_config.enable_legend:
FM.add_legend_to_canvas(canvas, plot_config.ratio, labels=[x.label for x in data],
plot_objects=[x.plot_object for x in data], **plot_config.legend_options)
if plot_config.lumi:
FM.decorate_canvas(canvas, plot_config)
if plot_config.stat_box:
FM.add_stat_box_to_canvas(canvas)
self.output_handle.register_object(canvas)
|
|
# stdlib
from typing import Any
from typing import Generator
from typing import Optional
import uuid
from uuid import UUID as uuid_type
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
# relative
from ...logger import critical
from ...logger import traceback_and_raise
from ...proto.core.common.common_object_pb2 import UID as UID_PB
from .decorators import singleton
from .serde.serializable import serializable
# TODO: make this callable from REPTs to pre-warm the UID cache when a large
# tensor starts executing a complex operation.
# Make sure to investigate any issues with taxing the /dev/urandom entropy
@singleton
class UIDValueGenerator:
def __init__(self, n_uids: int = 1000) -> None:
self.uid_store: list[uuid_type] = []
self.__prepopulate(n_uids)
def __prepopulate(self, n_uids: int) -> None:
for _ in range(n_uids):
self.uid_store.append(uuid.uuid4())
def get_uid(self) -> Generator:
for uid in self.uid_store:
yield uid
uuid_value_generator = UIDValueGenerator().get_uid()
@serializable()
class UID:
"""A unique ID for every Syft object.
This object creates a unique ID for every object in the Syft
ecosystem. This ID is guaranteed to be unique for the node on
which it is initialized and is very likely to be unique across
the whole ecosystem (because it is long and randomly generated).
Nearly all objects within Syft subclass from this object because
nearly all objects need to have a unique ID. The only major
exception a the time of writing is the Client object because it
just points to another object which itself has an id.
There is no other way in Syft to create an ID for any object.
"""
value: uuid_type
def __init__(self, value: Optional[uuid_type] = None):
"""Initializes the internal id using the uuid package.
This initializes the object. Normal use for this object is
to initialize the constructor with value==None because you
want to initialize with a novel ID. The only major exception
is deserialization, wherein a UID object is created with a
specific id value.
:param value: if you want to initialize an object with a specific UID, pass it
in here. This is normally only used during deserialization.
:type value: uuid.uuid4, optional
:return: returns the initialized object
:rtype: UID
.. code-block:: python
from syft.core.common.uid import UID
my_id = UID()
"""
# checks to make sure you've set a proto_type
super().__init__()
# if value is not set - create a novel and unique ID.
self.value = (
next(uuid_value_generator, uuid.uuid4()) if value is None else value
)
@staticmethod
def from_string(value: str) -> "UID":
try:
return UID(value=uuid.UUID(value))
except Exception as e:
critical(f"Unable to convert {value} to UUID. {e}")
traceback_and_raise(e)
def to_string(self) -> str:
return self.no_dash
def __hash__(self) -> int:
"""Hashes the UID for use in dictionaries and sets
A very common use of UID objects is as a key in a dictionary
or database. The object must be able to be hashed in order to
be used in this way. We take the 128-bit int representation of the
value.
:return: returns a hash of the object
:rtype: int
.. note::
Note that this probably gets further hashed into a shorter
representation for most python data-structures.
.. note::
Note that we assume that any collisions will be very rare and
detected by the ObjectStore class in Syft.
"""
return self.value.int
def __eq__(self, other: Any) -> bool:
"""Checks to see if two UIDs are the same using the internal object
This checks to see whether this UID is equal to another UID by
comparing whether they have the same .value objects. These objects
come with their own __eq__ function which we assume to be correct.
:param other: this is the other ID to be compared with
:type other: Any (note this must be Any or __eq__ fails on other types)
:return: returns True/False based on whether the objects are the same
:rtype: bool
"""
try:
return self.value == other.value
except Exception:
return False
@property
def no_dash(self) -> str:
return str(self.value).replace("-", "")
def __repr__(self) -> str:
"""Returns a human-readable version of the ID
Return a human-readable representation of the UID with brackets
so that it can be easily spotted when nested inside of the human-
readable representations of other objects."""
return f"<{type(self).__name__}: {self.no_dash}>"
def char_emoji(self, hex_chars: str) -> str:
base = ord("\U0001F642")
hex_base = ord("0")
code = 0
for char in hex_chars:
offset = ord(char)
code += offset - hex_base
return chr(base + code)
def string_emoji(self, string: str, length: int, chunk: int) -> str:
output = []
part = string[-length:]
while len(part) > 0:
part, end = part[:-chunk], part[-chunk:]
output.append(self.char_emoji(hex_chars=end))
return "".join(output)
def emoji(self) -> str:
return f"<UID:{self.string_emoji(string=str(self.value), length=8, chunk=4)}>"
def repr_short(self) -> str:
"""Returns a SHORT human-readable version of the ID
Return a SHORT human-readable version of the ID which
makes it print nicer when embedded (often alongside other
UID objects) within other object __repr__ methods."""
return f"..{str(self.value)[-5:]}"
def _object2proto(self) -> UID_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: ProtoUID
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return UID_PB(value=self.value.bytes)
@staticmethod
def _proto2object(proto: UID_PB) -> "UID":
"""Creates a UID from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of UID
:rtype: UID
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return UID(value=uuid.UUID(bytes=proto.value))
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return UID_PB
|
|
"""Tests for tools for manipulating of large commutative expressions. """
from sympy import (S, Add, sin, Mul, Symbol, oo, Integral, sqrt, Tuple, I,
Interval, O, symbols, simplify, collect, Sum, Basic, Dict,
root, exp, cos, sin)
from sympy.abc import a, b, t, x, y, z
from sympy.core.exprtools import (decompose_power, Factors, Term, _gcd_terms,
gcd_terms, factor_terms, factor_nc)
from sympy.core.mul import _keep_coeff as _keep_coeff
from sympy.simplify.cse_opts import sub_pre
from sympy.utilities.pytest import raises
def test_decompose_power():
assert decompose_power(x) == (x, 1)
assert decompose_power(x**2) == (x, 2)
assert decompose_power(x**(2*y)) == (x**y, 2)
assert decompose_power(x**(2*y/3)) == (x**(y/3), 2)
def test_Factors():
assert Factors() == Factors({}) == Factors(S(1))
assert Factors().as_expr() == S.One
assert Factors({x: 2, y: 3, sin(x): 4}).as_expr() == x**2*y**3*sin(x)**4
assert Factors(S.Infinity) == Factors({oo: 1})
assert Factors(S.NegativeInfinity) == Factors({oo: 1, -1: 1})
a = Factors({x: 5, y: 3, z: 7})
b = Factors({ y: 4, z: 3, t: 10})
assert a.mul(b) == a*b == Factors({x: 5, y: 7, z: 10, t: 10})
assert a.div(b) == divmod(a, b) == \
(Factors({x: 5, z: 4}), Factors({y: 1, t: 10}))
assert a.quo(b) == a/b == Factors({x: 5, z: 4})
assert a.rem(b) == a % b == Factors({y: 1, t: 10})
assert a.pow(3) == a**3 == Factors({x: 15, y: 9, z: 21})
assert b.pow(3) == b**3 == Factors({y: 12, z: 9, t: 30})
assert a.gcd(b) == Factors({y: 3, z: 3})
assert a.lcm(b) == Factors({x: 5, y: 4, z: 7, t: 10})
a = Factors({x: 4, y: 7, t: 7})
b = Factors({z: 1, t: 3})
assert a.normal(b) == (Factors({x: 4, y: 7, t: 4}), Factors({z: 1}))
assert Factors(sqrt(2)*x).as_expr() == sqrt(2)*x
assert Factors(-I)*I == Factors()
assert Factors({S(-1): S(3)})*Factors({S(-1): S(1), I: S(5)}) == \
Factors(I)
assert Factors(S(2)**x).div(S(3)**x) == \
(Factors({S(2): x}), Factors({S(3): x}))
assert Factors(2**(2*x + 2)).div(S(8)) == \
(Factors({S(2): 2*x + 2}), Factors({S(8): S(1)}))
# coverage
# /!\ things break if this is not True
assert Factors({S(-1): S(3)/2}) == Factors({I: S.One, S(-1): S.One})
assert Factors({I: S(1), S(-1): S(1)/3}).as_expr() == I*(-1)**(S(1)/3)
assert Factors(-1.) == Factors({S(-1): S(1), S(1.): 1})
assert Factors(-2.) == Factors({S(-1): S(1), S(2.): 1})
assert Factors((-2.)**x) == Factors({S(-2.): x})
assert Factors(S(-2)) == Factors({S(-1): S(1), S(2): 1})
assert Factors(S.Half) == Factors({S(2): -S.One})
assert Factors(S(3)/2) == Factors({S(3): S.One, S(2): S(-1)})
assert Factors({I: S(1)}) == Factors(I)
assert Factors({-1.0: 2, I: 1}) == Factors({S(1.0): 1, I: 1})
assert Factors({S.NegativeOne: -S(3)/2}).as_expr() == I
A = symbols('A', commutative=False)
assert Factors(2*A**2) == Factors({S(2): 1, A**2: 1})
assert Factors(I) == Factors({I: S.One})
assert Factors(x).normal(S(2)) == (Factors(x), Factors(S(2)))
assert Factors(x).normal(S(0)) == (Factors(), Factors(S(0)))
raises(ZeroDivisionError, lambda: Factors(x).div(S(0)))
assert Factors(x).mul(S(2)) == Factors(2*x)
assert Factors(x).mul(S(0)).is_zero
assert Factors(x).mul(1/x).is_one
assert Factors(x**sqrt(2)**3).as_expr() == x**(2*sqrt(2))
assert Factors(x)**Factors(S(2)) == Factors(x**2)
assert Factors(x).gcd(S(0)) == Factors(x)
assert Factors(x).lcm(S(0)).is_zero
assert Factors(S(0)).div(x) == (Factors(S(0)), Factors())
assert Factors(x).div(x) == (Factors(), Factors())
assert Factors({x: .2})/Factors({x: .2}) == Factors()
assert Factors(x) != Factors()
assert Factors(S(0)).normal(x) == (Factors(S(0)), Factors())
n, d = x**(2 + y), x**2
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors(x**y), Factors())
assert f.gcd(d) == Factors()
d = x**y
assert f.div(d) == f.normal(d) == (Factors(x**2), Factors())
assert f.gcd(d) == Factors(d)
n = d = 2**x
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors(), Factors())
assert f.gcd(d) == Factors(d)
n, d = 2**x, 2**y
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors({S(2): x}), Factors({S(2): y}))
assert f.gcd(d) == Factors()
# extraction of constant only
n = x**(x + 3)
assert Factors(n).normal(x**-3) == (Factors({x: x + 6}), Factors({}))
assert Factors(n).normal(x**3) == (Factors({x: x}), Factors({}))
assert Factors(n).normal(x**4) == (Factors({x: x}), Factors({x: 1}))
assert Factors(n).normal(x**(y - 3)) == \
(Factors({x: x + 6}), Factors({x: y}))
assert Factors(n).normal(x**(y + 3)) == (Factors({x: x}), Factors({x: y}))
assert Factors(n).normal(x**(y + 4)) == \
(Factors({x: x}), Factors({x: y + 1}))
assert Factors(n).div(x**-3) == (Factors({x: x + 6}), Factors({}))
assert Factors(n).div(x**3) == (Factors({x: x}), Factors({}))
assert Factors(n).div(x**4) == (Factors({x: x}), Factors({x: 1}))
assert Factors(n).div(x**(y - 3)) == \
(Factors({x: x + 6}), Factors({x: y}))
assert Factors(n).div(x**(y + 3)) == (Factors({x: x}), Factors({x: y}))
assert Factors(n).div(x**(y + 4)) == \
(Factors({x: x}), Factors({x: y + 1}))
def test_Term():
a = Term(4*x*y**2/z/t**3)
b = Term(2*x**3*y**5/t**3)
assert a == Term(4, Factors({x: 1, y: 2}), Factors({z: 1, t: 3}))
assert b == Term(2, Factors({x: 3, y: 5}), Factors({t: 3}))
assert a.as_expr() == 4*x*y**2/z/t**3
assert b.as_expr() == 2*x**3*y**5/t**3
assert a.inv() == \
Term(S(1)/4, Factors({z: 1, t: 3}), Factors({x: 1, y: 2}))
assert b.inv() == Term(S(1)/2, Factors({t: 3}), Factors({x: 3, y: 5}))
assert a.mul(b) == a*b == \
Term(8, Factors({x: 4, y: 7}), Factors({z: 1, t: 6}))
assert a.quo(b) == a/b == Term(2, Factors({}), Factors({x: 2, y: 3, z: 1}))
assert a.pow(3) == a**3 == \
Term(64, Factors({x: 3, y: 6}), Factors({z: 3, t: 9}))
assert b.pow(3) == b**3 == Term(8, Factors({x: 9, y: 15}), Factors({t: 9}))
assert a.pow(-3) == a**(-3) == \
Term(S(1)/64, Factors({z: 3, t: 9}), Factors({x: 3, y: 6}))
assert b.pow(-3) == b**(-3) == \
Term(S(1)/8, Factors({t: 9}), Factors({x: 9, y: 15}))
assert a.gcd(b) == Term(2, Factors({x: 1, y: 2}), Factors({t: 3}))
assert a.lcm(b) == Term(4, Factors({x: 3, y: 5}), Factors({z: 1, t: 3}))
a = Term(4*x*y**2/z/t**3)
b = Term(2*x**3*y**5*t**7)
assert a.mul(b) == Term(8, Factors({x: 4, y: 7, t: 4}), Factors({z: 1}))
assert Term((2*x + 2)**3) == Term(8, Factors({x + 1: 3}), Factors({}))
assert Term((2*x + 2)*(3*x + 6)**2) == \
Term(18, Factors({x + 1: 1, x + 2: 2}), Factors({}))
def test_gcd_terms():
f = 2*(x + 1)*(x + 4)/(5*x**2 + 5) + (2*x + 2)*(x + 5)/(x**2 + 1)/5 + \
(2*x + 2)*(x + 6)/(5*x**2 + 5)
assert _gcd_terms(f) == ((S(6)/5)*((1 + x)/(1 + x**2)), 5 + x, 1)
assert _gcd_terms(Add.make_args(f)) == \
((S(6)/5)*((1 + x)/(1 + x**2)), 5 + x, 1)
newf = (S(6)/5)*((1 + x)*(5 + x)/(1 + x**2))
assert gcd_terms(f) == newf
args = Add.make_args(f)
# non-Basic sequences of terms treated as terms of Add
assert gcd_terms(list(args)) == newf
assert gcd_terms(tuple(args)) == newf
assert gcd_terms(set(args)) == newf
# but a Basic sequence is treated as a container
assert gcd_terms(Tuple(*args)) != newf
assert gcd_terms(Basic(Tuple(1, 3*y + 3*x*y), Tuple(1, 3))) == \
Basic((1, 3*y*(x + 1)), (1, 3))
# but we shouldn't change keys of a dictionary or some may be lost
assert gcd_terms(Dict((x*(1 + y), 2), (x + x*y, y + x*y))) == \
Dict({x*(y + 1): 2, x + x*y: y*(1 + x)})
assert gcd_terms((2*x + 2)**3 + (2*x + 2)**2) == 4*(x + 1)**2*(2*x + 3)
assert gcd_terms(0) == 0
assert gcd_terms(1) == 1
assert gcd_terms(x) == x
assert gcd_terms(2 + 2*x) == Mul(2, 1 + x, evaluate=False)
arg = x*(2*x + 4*y)
garg = 2*x*(x + 2*y)
assert gcd_terms(arg) == garg
assert gcd_terms(sin(arg)) == sin(garg)
# issue 6139-like
alpha, alpha1, alpha2, alpha3 = symbols('alpha:4')
a = alpha**2 - alpha*x**2 + alpha + x**3 - x*(alpha + 1)
rep = (alpha, (1 + sqrt(5))/2 + alpha1*x + alpha2*x**2 + alpha3*x**3)
s = (a/(x - alpha)).subs(*rep).series(x, 0, 1)
assert simplify(collect(s, x)) == -sqrt(5)/2 - S(3)/2 + O(x)
# issue 5917
assert _gcd_terms([S.Zero, S.Zero]) == (0, 0, 1)
assert _gcd_terms([2*x + 4]) == (2, x + 2, 1)
eq = x/(x + 1/x)
assert gcd_terms(eq, fraction=False) == eq
def test_factor_terms():
A = Symbol('A', commutative=False)
assert factor_terms(9*(x + x*y + 1) + (3*x + 3)**(2 + 2*x)) == \
9*x*y + 9*x + _keep_coeff(S(3), x + 1)**_keep_coeff(S(2), x + 1) + 9
assert factor_terms(9*(x + x*y + 1) + (3)**(2 + 2*x)) == \
_keep_coeff(S(9), 3**(2*x) + x*y + x + 1)
assert factor_terms(3**(2 + 2*x) + a*3**(2 + 2*x)) == \
9*3**(2*x)*(a + 1)
assert factor_terms(x + x*A) == \
x*(1 + A)
assert factor_terms(sin(x + x*A)) == \
sin(x*(1 + A))
assert factor_terms((3*x + 3)**((2 + 2*x)/3)) == \
_keep_coeff(S(3), x + 1)**_keep_coeff(S(2)/3, x + 1)
assert factor_terms(x + (x*y + x)**(3*x + 3)) == \
x + (x*(y + 1))**_keep_coeff(S(3), x + 1)
assert factor_terms(a*(x + x*y) + b*(x*2 + y*x*2)) == \
x*(a + 2*b)*(y + 1)
i = Integral(x, (x, 0, oo))
assert factor_terms(i) == i
# check radical extraction
eq = sqrt(2) + sqrt(10)
assert factor_terms(eq) == eq
assert factor_terms(eq, radical=True) == sqrt(2)*(1 + sqrt(5))
eq = root(-6, 3) + root(6, 3)
assert factor_terms(eq, radical=True) == 6**(S(1)/3)*(1 + (-1)**(S(1)/3))
eq = [x + x*y]
ans = [x*(y + 1)]
for c in [list, tuple, set]:
assert factor_terms(c(eq)) == c(ans)
assert factor_terms(Tuple(x + x*y)) == Tuple(x*(y + 1))
assert factor_terms(Interval(0, 1)) == Interval(0, 1)
e = 1/sqrt(a/2 + 1)
assert factor_terms(e, clear=False) == 1/sqrt(a/2 + 1)
assert factor_terms(e, clear=True) == sqrt(2)/sqrt(a + 2)
eq = x/(x + 1/x) + 1/(x**2 + 1)
assert factor_terms(eq, fraction=False) == eq
assert factor_terms(eq, fraction=True) == 1
assert factor_terms((1/(x**3 + x**2) + 2/x**2)*y) == \
y*(2 + 1/(x + 1))/x**2
# if not True, then processesing for this in factor_terms is not necessary
assert gcd_terms(-x - y) == -x - y
assert factor_terms(-x - y) == Mul(-1, x + y, evaluate=False)
# if not True, then "special" processesing in factor_terms is not necessary
assert gcd_terms(exp(Mul(-1, x + 1))) == exp(-x - 1)
e = exp(-x - 2) + x
assert factor_terms(e) == exp(Mul(-1, x + 2, evaluate=False)) + x
assert factor_terms(e, sign=False) == e
assert factor_terms(exp(-4*x - 2) - x) == -x + exp(Mul(-2, 2*x + 1, evaluate=False))
def test_xreplace():
e = Mul(2, 1 + x, evaluate=False)
assert e.xreplace({}) == e
assert e.xreplace({y: x}) == e
def test_factor_nc():
x, y = symbols('x,y')
k = symbols('k', integer=True)
n, m, o = symbols('n,m,o', commutative=False)
# mul and multinomial expansion is needed
from sympy.core.function import _mexpand
e = x*(1 + y)**2
assert _mexpand(e) == x + x*2*y + x*y**2
def factor_nc_test(e):
ex = _mexpand(e)
assert ex.is_Add
f = factor_nc(ex)
assert not f.is_Add and _mexpand(f) == ex
factor_nc_test(x*(1 + y))
factor_nc_test(n*(x + 1))
factor_nc_test(n*(x + m))
factor_nc_test((x + m)*n)
factor_nc_test(n*m*(x*o + n*o*m)*n)
s = Sum(x, (x, 1, 2))
factor_nc_test(x*(1 + s))
factor_nc_test(x*(1 + s)*s)
factor_nc_test(x*(1 + sin(s)))
factor_nc_test((1 + n)**2)
factor_nc_test((x + n)*(x + m)*(x + y))
factor_nc_test(x*(n*m + 1))
factor_nc_test(x*(n*m + x))
factor_nc_test(x*(x*n*m + 1))
factor_nc_test(x*n*(x*m + 1))
factor_nc_test(x*(m*n + x*n*m))
factor_nc_test(n*(1 - m)*n**2)
factor_nc_test((n + m)**2)
factor_nc_test((n - m)*(n + m)**2)
factor_nc_test((n + m)**2*(n - m))
factor_nc_test((m - n)*(n + m)**2*(n - m))
assert factor_nc(n*(n + n*m)) == n**2*(1 + m)
assert factor_nc(m*(m*n + n*m*n**2)) == m*(m + n*m*n)*n
eq = m*sin(n) - sin(n)*m
assert factor_nc(eq) == eq
# for coverage:
from sympy.physics.secondquant import Commutator
from sympy import factor
eq = 1 + x*Commutator(m, n)
assert factor_nc(eq) == eq
eq = x*Commutator(m, n) + x*Commutator(m, o)*Commutator(m, n)
assert factor(eq) == x*(1 + Commutator(m, o))*Commutator(m, n)
# issue 6534
assert (2*n + 2*m).factor() == 2*(n + m)
# issue 6701
assert factor_nc(n**k + n**(k + 1)) == n**k*(1 + n)
assert factor_nc((m*n)**k + (m*n)**(k + 1)) == (1 + m*n)*(m*n)**k
# issue 6918
assert factor_nc(-n*(2*x**2 + 2*x)) == -2*n*x*(x + 1)
def test_issue_6360():
a, b = symbols("a b")
apb = a + b
eq = apb + apb**2*(-2*a - 2*b)
assert factor_terms(sub_pre(eq)) == a + b - 2*(a + b)**3
def test_issue_7903():
a = symbols(r'a', real=True)
t = exp(I*cos(a)) + exp(-I*sin(a))
assert t.simplify()
|
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import os
import platform
import sys
# environment at generation time
CMAKE_PREFIX_PATH = '/opt/ros/groovy'.split(';')
setup_dir = '/usr/local'
if setup_dir not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, setup_dir)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': 'lib',
'PATH': 'bin',
'PKG_CONFIG_PATH': 'lib/pkgconfig',
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolder = env_var_subfolders[key]
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte'))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolder):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if subfolder:
path = os.path.join(path, subfolder)
# exclude any path already in env and any path we already added
if path not in environ_paths and path not in checked_paths:
checked_paths.append(path)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
specific_env_hooks = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
generic_env_hooks.remove(generic_env_hooks_by_filename[filename])
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
specific_env_hooks.remove(specific_env_hooks_by_filename[filename])
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS', os.pathsep.join(generic_env_hooks + specific_env_hooks)))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
exit(1)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
"""
Deployments
The model for this is in templates.RMS.deploy.py
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Customisable module homepage """
return settings.customise_home(c, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Fallback for module homepage when not customised and
no CMS content found (ADMINs will see CMS edit unless
disabled globally via settings.cms.hide_index)
"""
# Just redirect to the Mission Summary View
s3_redirect_default(URL(f="mission", args="summary"))
# -----------------------------------------------------------------------------
def mission():
""" RESTful CRUD Controller """
def prep(r):
# Configure created_on field in deploy_mission
#created_on = r.table.created_on
#created_on.readable = True
#created_on.label = T("Date Created")
#created_on.represent = lambda d: \
# s3base.S3DateTime.date_represent(d, utc=True)
if r.id:
# Mission-specific workflows return to the profile page
tablename = r.tablename if not r.component else r.component.tablename
next_url = r.url(component="", method="profile", vars={})
if r.component_name == "alert":
alert_create_script()
if settings.get_deploy_manual_recipients():
create_next = URL(f="alert", args=["[id]", "select"])
else:
create_next = next_url
s3db.configure(tablename,
create_next = create_next,
delete_next = next_url,
update_next = next_url,
)
else:
s3db.configure(tablename,
create_next = next_url,
delete_next = next_url,
update_next = next_url,
)
s3.cancel = next_url
if r.component_name == "assignment":
member_id = r.get_vars.get("member_id", None)
if member_id and str(member_id).isdigit():
# Deploy-this-member action
htable = s3db.hrm_human_resource
query = (htable.id == member_id) & \
(htable.deleted != True)
row = db(query).select(htable.id,
limitby = (0, 1)
).first()
if row:
field = s3db.deploy_assignment.human_resource_id
field.default = row.id
field.writable = False
field.comment = None
elif r.method == "create":
atable = s3db.deploy_assignment
atable.end_date.writable = atable.end_date.readable = False
if not r.component and r.method == "profile":
represent = lambda d: \
s3base.S3DateTime.datetime_represent(d, utc=True)
s3db.deploy_alert.modified_on.represent = represent
s3db.deploy_response.created_on.represent = represent
s3base.s3_trunk8(lines=1)
else:
# All other workflows return to the summary page
s3.cancel = r.url(method="summary", component=None, id=0)
if not r.component:
status = r.get_vars.get("~.status__belongs")
if status == "2":
s3.crud_strings[r.tablename]["title_list"] = T("Active Missions")
elif status == "1":
s3.crud_strings[r.tablename]["title_list"] = T("Closed Missions")
return True
s3.prep = prep
def postp(r, output):
if not r.component:
# Override mission open actions to go to the profile page
s3_action_buttons(r,
deletable = True,
editable = True,
read_url = r.url(method="profile", id="[id]"),
update_url = r.url(method="profile", id="[id]"),
delete_url = r.url(method="delete", id="[id]"),
)
# Override the missions list-button go to the summary page
if isinstance(output, dict) and "buttons" in output:
# Override standard "List" button
buttons = output["buttons"]
if "list_btn" in buttons and "summary_btn" in buttons:
buttons["list_btn"] = buttons["summary_btn"]
elif "subtitle" in output and "rheader" in output:
# In component CRUD views, have a subtitle after the rheader
output["rheader"] = TAG[""](output["rheader"],
H3(output["subtitle"]))
return output
s3.postp = postp
return s3_rest_controller(# Remove the title if we have a component
# (rheader includes the title)
notitle = lambda r: {"title": ""} \
if r.component else None,
rheader = s3db.deploy_rheader,
)
# -----------------------------------------------------------------------------
def response_message():
"""
RESTful CRUD Controller
- can't be called 'response' as this clobbbers web2py global!
"""
return s3_rest_controller("deploy", "response",
custom_crud_buttons = {"list_btn": None},
)
# -----------------------------------------------------------------------------
def human_resource():
"""
RESTful CRUD Controller
"""
# Tweak settings for RDRT
# @ToDo: These should really be in customise_ in IFRC template
settings.hrm.staff_experience = True
settings.hrm.use_skills = True
settings.search.filter_manager = True
# Add deploy_alert_recipient as component so that we can filter by it
s3db.add_components("hrm_human_resource",
deploy_alert_recipient = "human_resource_id",
)
# Filter to just Deployables
q = FS("application.active") != None
output = s3db.hrm_human_resource_controller(extra_filter = q)
return output
# -----------------------------------------------------------------------------
def person():
"""
'Members' RESTful CRUD Controller
- used as "member profile"
- used for Imports
"""
# Tweak settings for RDRT
settings.hrm.staff_experience = "experience"
settings.hrm.vol_experience = "experience"
settings.hrm.use_skills = True
settings.search.filter_manager = True
# Use Legacy table for unavailability
s3db.add_components("pr_person",
deploy_unavailability = "person_id",
)
return s3db.hrm_person_controller(replace_option = None,
csv_extra_fields = [
# CSV column headers, so no T()
dict(label="Deployable",
value="true"),
# Assume volunteer if not
# specified in CSV
dict(label="Type",
value="volunteer"),
],
csv_stylesheet = ("hrm", "person.xsl"),
csv_template = ("deploy", "person"),
)
# -----------------------------------------------------------------------------
def group():
"""
Groups RESTful CRUD Controller
- used to control membership of a group cc'd on Alerts
"""
def prep(r):
tablename = "pr_group"
s3db.configure(tablename,
deletable = False,
)
if r.component:
if r.component_name == "group_membership":
ctable = r.component.table
# Hide group_head field
field = ctable.group_head
field.readable = field.writable = False
# Configure person_id widget
settings.pr.request_dob = False
settings.pr.request_gender = False
field = ctable.person_id
field.widget = s3base.S3AddPersonWidget(controller="deploy")
# Configure list_fields for this context
list_fields = ["person_id",
"comments",
]
s3db.configure("pr_group_membership",
list_fields = list_fields,
)
elif not r.id:
table = r.table
# Have we got a group defined?
ltable = s3db.org_organisation_team
query = (table.deleted == False) & \
(table.system == False) & \
(table.group_type == 5)
organisation_id = auth.user.organisation_id
if organisation_id:
left = ltable.on((ltable.group_id == table.id) & \
((ltable.organisation_id == organisation_id) | \
(ltable.organisation_id == None)))
else:
left = None
groups = db(query).select(table.id,
ltable.organisation_id,
left = left,
)
if organisation_id and len(groups) > 1:
_channels = groups.find(lambda row: row["org_organisation_team.organisation_id"] == organisation_id)
if not _channels:
_channels = groups.find(lambda row: row["org_organisation_team.organisation_id"] == None)
record = _channels.first()
else:
record = groups.first()
if record:
record_id = record.pr_group.id
r.id = record_id
r.resource.add_filter(table.id == record_id)
r.method = "update"
else:
r.method = "create"
return True
s3.prep = prep
return s3_rest_controller("pr", "group")
# -----------------------------------------------------------------------------
def application():
"""
Custom workflow to manually create standing applications
for deployments (for staff/volunteers)
"""
# Tweak settings for RDRT
settings.hrm.staff_experience = True
settings.hrm.use_skills = True
settings.search.filter_manager = True
def prep(r):
method = r.method
if not method and r.representation != "s3json":
r.method = method = "select"
if method == "select":
r.custom_action = s3db.deploy_apply
return True
s3.prep = prep
if "delete" in request.args or \
request.env.request_method == "POST" and auth.permission.format=="s3json":
return s3_rest_controller()
else:
#return s3db.hrm_human_resource_controller()
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def assignment():
""" RESTful CRUD Controller """
def prep(r):
mission_date = s3db.deploy_mission.created_on
mission_date.represent = lambda d: \
s3base.S3DateTime.date_represent(d, utc=True)
if r.record:
table = r.resource.table
table.mission_id.writable = False
table.human_resource_id.writable = False
if r.representation == "popup":
r.resource.configure(insertable=False)
return True
s3.prep = prep
def postp(r, output):
if r.id and isinstance(output, dict):
# Add button to Upload Appraisal
popup = r.representation == "popup"
record_id = r.id
atable = s3db.hrm_appraisal
ltable = s3db.deploy_assignment_appraisal
query = (ltable.assignment_id == record_id) & \
(atable.id == ltable.appraisal_id) & \
(atable.deleted != True)
appraisal = db(query).select(atable.id,
limitby=(0, 1)).first()
permit = auth.s3_has_permission
url = None
if appraisal and permit("update", atable, record_id=appraisal.id):
hrtable = db.hrm_human_resource
hr = db(hrtable.id == r.record.human_resource_id).select(hrtable.person_id,
limitby=(0, 1)
).first()
if hr:
get_vars = {}
if popup:
method = "update.popup"
refresh = get_vars.get("refresh", None)
if refresh:
get_vars["refresh"] = refresh
record = get_vars.get("record", None)
if record:
get_vars["record"] = record
else:
method = "update"
url = URL(c="deploy", f="person",
args=[hr.person_id, "appraisal",
appraisal.id, method],
vars=get_vars,
)
elif permit("update", r.table, record_id=record_id):
# Currently we assume that anyone who can edit the assignment can upload the appraisal
hrtable = db.hrm_human_resource
hr = db(hrtable.id == r.record.human_resource_id).select(hrtable.person_id,
limitby=(0, 1)
).first()
if hr:
get_vars = {"mission_id": r.record.mission_id,
}
if popup:
method = "create.popup"
refresh = get_vars.get("refresh", None)
if refresh:
get_vars["refresh"] = refresh
record = get_vars.get("record", None)
if record:
get_vars["record"] = record
else:
method = "create"
url = URL(c="deploy", f="person",
args=[hr.person_id, "appraisal", method],
vars=get_vars,
)
if url:
button = s3base.S3CRUD.crud_button(T("Upload Appraisal"),
_href=url,
_class="action-btn",
)
if popup:
output["items"] = button
else:
s3.rfooter = button
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def competency():
""" RESTful CRUD controller - unfiltered version """
return s3db.hrm_competency_controller()
# -----------------------------------------------------------------------------
def credential():
""" RESTful CRUD controller - unfiltered version """
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller - unfiltered version """
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def event_type():
""" RESTful CRUD Controller """
return s3_rest_controller("event", "event_type")
# -----------------------------------------------------------------------------
def job_title():
""" RESTful CRUD Controller """
return s3_rest_controller("hrm", "job_title")
# -----------------------------------------------------------------------------
def training():
""" Training Controller - unfiltered version """
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter to just deployables (RDRT Members)
s3.filter = FS("application.active") == True
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter to just deployables (RDRT Members)
s3.filter = FS("application.active") == True
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def alert_create_script():
"""
Inject JS to help the Alert creation form
"""
# @ToDo: Generalise for alternate gateways
# @ToDo: Port to _compose_form
table = s3db.msg_sms_webapi_channel
gateway = db(table.enabled == True).select(table.max_length,
limitby = (0, 1)
).first()
if gateway:
max_length = gateway.max_length
if max_length is None:
# Single SMS
max_length = 160
else:
# Single SMS
max_length = 160
script = \
'''$('#deploy_alert_contact_method').change(function(){
var v=$(this).val()
if(v==1){$('#deploy_alert_subject__row,#deploy_alert_subject__row1').show()
$('#deploy_alert_subject__row1 label').html(i18n.subject+':')
S3.maxLength.init('deploy_alert_body',0)
}else if(v==2){$('#deploy_alert_subject__row,#deploy_alert_subject__row1').hide()
S3.maxLength.init('deploy_alert_body',%(max_length)s)
}else if(v==9){$('#deploy_alert_subject__row,#deploy_alert_subject__row1').show()
$('#deploy_alert_subject__row1 label').html(i18n.subject+': <span class="red">'+i18n.only_visible+'</span>')
S3.maxLength.init('deploy_alert_body',%(max_length)s)
}})''' % dict(max_length = max_length)
s3.jquery_ready.append(script)
i18n = \
'''i18n.characters_left="%s"
i18n.subject="%s"
i18n.only_visible="%s"''' % (T("characters left"),
T("Subject"),
T("Only visible to Email recipients"))
s3.js_global.append(i18n)
# -----------------------------------------------------------------------------
def alert():
""" RESTful CRUD Controller """
# Tweak settings for RDRT
settings.hrm.staff_experience = True
settings.hrm.use_skills = True
settings.search.filter_manager = True
def prep(r):
if r.component:
if r.component.alias == "select":
if not r.method:
r.method = "select"
if r.method == "select":
r.custom_action = s3db.deploy_alert_select_recipients
elif r.component_name == "response":
s3db.configure(r.component.tablename,
deletable = False,
editable = False,
insertable = False,
)
elif r.component_name == "recipient":
settings.search.filter_manager = False
from s3 import S3TextFilter, S3OptionsFilter
recipient_filters = [
S3TextFilter([
"human_resource_id$person_id$first_name",
"human_resource_id$person_id$middle_name",
"human_resource_id$person_id$last_name",
],
label=current.T("Name"),
),
S3OptionsFilter(
"human_resource_id$organisation_id",
widget="multiselect",
search=True,
header="",
hidden=True,
),
]
if settings.get_org_regions():
recipient_filters.insert(1,
s3base.S3HierarchyFilter(
"human_resource_id$organisation_id$organisation_region.region_id",
lookup="org_region",
hidden=True,
)
)
s3db.configure(r.component.tablename,
filter_widgets = recipient_filters,
)
if r.record.message_id:
s3db.configure(r.component.tablename,
deletable = False,
insertable = False,
)
else:
# No component
if r.record:
if r.record.message_id:
# Already sent - so lock
s3db.configure(r.tablename,
deletable = False,
editable = False,
)
else:
alert_create_script()
if settings.get_deploy_manual_recipients():
create_next = URL(f="alert", args=["[id]", "select"])
else:
create_next = URL(f="alert", args=["[id]", "recipient"])
s3db.configure(r.tablename,
create_next = create_next,
deletable = False,
# @ToDo: restrict in postp to change this action button
#editable = False,
)
return True
s3.prep = prep
def postp(r, output):
if r.component:
if r.component_name == "select":
s3.actions = [{"label": str(READ),
"url": URL(f="human_resource",
args = ["[id]", "profile"],
),
"_class": "action-btn read",
}
]
if r.component_name == "recipient":
# Open should open the HR profile, not the link
open_url = URL(f="human_resource",
args = ["profile"],
vars = {"alert_recipient.id": "[id]"},
)
# Delete should delete the link, not the HR profile
delete_url = URL(f="alert",
args=[r.id, "recipient", "[id]", "delete"],
)
s3_action_buttons(r,
read_url = open_url,
update_url = open_url,
delete_url = delete_url,
# Can't delete recipients after the alert
# has been sent:
deletable = not r.record.message_id
)
else:
# Delete should only be possible if the Alert hasn't yet been sent
table = r.table
query = auth.s3_accessible_query("delete", "deploy_alert") & \
(table.message_id == None)
rows = db(query).select(table.id)
restrict = [str(row.id) for row in rows]
s3.actions = [{"label": str(READ),
"url": URL(f="alert", args="[id]"),
"_class": "action-btn read",
},
{"label": str(DELETE),
"url": URL(f="alert", args=["[id]", "delete"]),
"restrict": restrict,
"_class": "delete-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller(rheader = s3db.deploy_rheader,
# Show filter only on recipient tab
hide_filter = {"recipient": False,
"_default": True,
}
)
# -----------------------------------------------------------------------------
def alert_response():
"""
RESTful CRUD Controller
- used to allow RIT Memebers to apply for Positions
@ToDo: Block all methods but CREATE => what next_url?
"""
alert_id = get_vars.get("alert_id")
if alert_id:
table = s3db.deploy_response
f = table.alert_id
f.readable = f.writable = False
f.default = alert_id
atable = s3db.deploy_alert
alert = db(atable.id == alert_id).select(atable.mission_id,
limitby=(0, 1),
).first()
if alert:
f = table.mission_id
f.readable = f.writable = False
f.default = alert.mission_id
human_resource_id = auth.s3_logged_in_human_resource()
if human_resource_id:
f = table.human_resource_id_id
f.readable = f.writable = False
f.default = alert_id
table.message_id.readable = False
#else:
# # Block
# pass
return s3_rest_controller("deploy", "response")
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages are visible here
@ToDo: Filter to those which have been unable to be automatically
processed as being responses to Alerts
@ToDo: Filter to those coming into the specific account used for
Deployments
@ToDo: Provide a mechanism (Action button?) to link a mail manually to
an Alert
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user", args="login"))
tablename = "msg_email"
table = s3db.msg_email
table.inbound.readable = False
table.channel_id.readable = False
table.to_address.readable = False
from s3.s3query import FS
s3.filter = (FS("response.id") == None) & \
(FS("inbound") == True)
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("date",
"subject",
"from_address",
"body",
S3SQLInlineComponent(
"attachment",
name = "document_id",
label = T("Attachments"),
fields = ["document_id"],
),
)
s3db.configure(tablename,
crud_form = crud_form,
editable = False,
insertable = False,
list_fields = ["id",
"date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
],
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_list = T("View InBox"),
title_update = T("Edit Message"),
label_list_button = T("View InBox"),
label_delete_button = T("Delete Message"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in InBox")
)
def prep(r):
# Decode subject and sender fields
decode = current.msg.decode_email
if r.id:
s3db.msg_attachment.document_id.label = ""
if r.component and r.component.alias == "select":
if not r.method:
r.method = "select"
if r.method == "select":
r.custom_action = s3db.deploy_response_select_mission
represent = lambda string: decode(string)
elif not r.method and r.representation in ("html", "aadata"):
# Use custom data table method to allow bulk deletion
r.method = "inbox"
r.custom_action = s3db.deploy_Inbox()
represent = lambda string: s3base.s3_datatable_truncate(decode(string))
table = r.resource.table
table.subject.represent = represent
table.from_address.represent = represent
return True
s3.prep = prep
def postp(r, output):
if r.interactive and r.record and not r.component:
# Custom CRUD button for linking the message to mission
authorised = auth.s3_has_permission("create", "deploy_response")
if authorised:
s3.rfooter = s3base.S3CRUD.crud_button(
T("Link to Mission"),
_href = URL(f="email_inbox",
args = [r.id, "select"],
),
_class = "action-btn link",
)
return output
s3.postp = postp
return s3_rest_controller("msg", "email")
# -----------------------------------------------------------------------------
def email_channel():
"""
RESTful CRUD controller for Inbound Email channels
@ToDo: Allow selection of a specific Channel for Alerts
"""
def prep(r):
table = r.table
tablename = "msg_email_channel"
s3db.configure(tablename,
deletable = False,
)
if not r.id:
# Have we got a channel defined?
query = (table.deleted == False) & \
(table.enabled == True)
organisation_id = auth.user.organisation_id
if organisation_id:
query &= ((table.organisation_id == organisation_id) | \
(table.organisation_id == None))
channels = db(query).select(table.id,
table.organisation_id,
)
if organisation_id and len(channels) > 1:
_channels = channels.find(lambda row: row.organisation_id == organisation_id)
if not _channels:
_channels = channels.find(lambda row: row.organisation_id == None)
record = _channels.first()
else:
record = channels.first()
if record:
record_id = record.id
r.id = record_id
r.resource.add_filter(table.id == record_id)
r.method = "update"
else:
r.method = "create"
if r.interactive:
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).")))
table.delete_from_server.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading.")))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Settings"),
title_list = T("Email Accounts"),
label_create = T("Create Email Account"),
title_update = T("Edit Email Settings"),
label_list_button = T("View Email Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Email Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Email Settings updated")
)
return True
s3.prep = prep
def postp(r, output):
if r.interactive and isinstance(output, dict) and \
not s3task._is_alive():
poll_btn = A(T("Poll"),
_class = "action-btn",
_href = URL(args=[r.id, "poll"])
)
output["rheader"] = poll_btn
return output
s3.postp = postp
return s3_rest_controller("msg")
# -----------------------------------------------------------------------------
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
def prep(r):
table = r.table
tablename = "msg_twitter_channel"
s3db.configure(tablename,
deletable = False,
)
if not r.id:
# Have we got a channel defined?
query = (table.deleted == False) & \
(table.enabled == True)
#organisation_id = auth.user.organisation_id
#if organisation_id:
# query &= ((table.organisation_id == organisation_id) | \
# (table.organisation_id == None))
#channels = db(query).select(table.id,
# table.organisation_id,
# )
#if organisation_id and len(channels) > 1:
# _channels = channels.find(lambda row: row.organisation_id == organisation_id)
# if not _channels:
# _channels = channels.find(lambda row: row.organisation_id == None)
# record = _channels.first()
#else:
# record = channels.first()
record = db(query).select(table.id,
limitby = (0, 1)
)
if record:
record_id = record.id
r.id = record_id
r.resource.add_filter(table.id == record_id)
r.method = "update"
else:
r.method = "create"
if r.interactive:
table.twitter_account.label = T("Current Twitter account")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Settings"),
title_list = T("Twitter Accounts"),
label_create = T("Create Twitter Account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Twitter Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Twitter Settings updated")
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"_class": "action-btn",
"url": URL(args=["[id]", "enable"]),
"restrict": restrict_e,
},
{"label": s3_str(T("Disable")),
"_class": "action-btn",
"url": URL(args = ["[id]", "disable"]),
"restrict": restrict_d,
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"_class": "action-btn",
"url": URL(args = ["[id]", "poll"]),
"restrict": restrict_d,
},
]
return output
s3.postp = postp
return s3_rest_controller("msg",
deduplicate = "",
list_btn = "",
)
# -----------------------------------------------------------------------------
def alert_recipient():
"""
RESTful CRUD controller for options.s3json lookups
- needed for adding recipients
"""
s3.prep = lambda r: r.method == "options" and r.representation == "s3json"
return s3_rest_controller()
# -----------------------------------------------------------------------------
# Messaging
#
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
|
|
from datetime import datetime, timedelta
import datetime as pydt
import numpy as np
from dateutil.relativedelta import relativedelta
import matplotlib.units as units
import matplotlib.dates as dates
from matplotlib.ticker import Formatter, AutoLocator, Locator
from matplotlib.transforms import nonsingular
from pandas.core.dtypes.common import (
is_float, is_integer,
is_integer_dtype,
is_float_dtype,
is_datetime64_ns_dtype,
is_period_arraylike,
is_nested_list_like
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.compat import lrange
import pandas.compat as compat
import pandas._libs.lib as lib
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.indexes.datetimes import date_range
import pandas.core.tools.datetimes as tools
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import FreqGroup
from pandas.core.indexes.period import Period, PeriodIndex
from pandas.plotting._compat import _mpl_le_2_0_0
# constants
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
def register():
units.registry[lib.Timestamp] = DatetimeConverter()
units.registry[Period] = PeriodConverter()
units.registry[pydt.datetime] = DatetimeConverter()
units.registry[pydt.date] = DatetimeConverter()
units.registry[pydt.time] = TimeConverter()
units.registry[np.datetime64] = DatetimeConverter()
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
def time2num(d):
if isinstance(d, compat.string_types):
parsed = tools.to_datetime(d)
if not isinstance(parsed, datetime):
raise ValueError('Could not parse time %s' % d)
return _to_ordinalf(parsed.time())
if isinstance(d, pydt.time):
return _to_ordinalf(d)
return d
class TimeConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
valid_types = (str, pydt.time)
if (isinstance(value, valid_types) or is_integer(value) or
is_float(value)):
return time2num(value)
if isinstance(value, Index):
return value.map(time2num)
if isinstance(value, (list, tuple, np.ndarray, Index)):
return [time2num(x) for x in value]
return value
@staticmethod
def axisinfo(unit, axis):
if unit != 'time':
return None
majloc = AutoLocator()
majfmt = TimeFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')
@staticmethod
def default_units(x, axis):
return 'time'
# time formatter
class TimeFormatter(Formatter):
def __init__(self, locs):
self.locs = locs
def __call__(self, x, pos=0):
fmt = '%H:%M:%S'
s = int(x)
ms = int((x - s) * 1e3)
us = int((x - s) * 1e6 - ms)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
if us != 0:
fmt += '.%6f'
elif ms != 0:
fmt += '.%3f'
return pydt.time(h, m, s, us).strftime(fmt)
# Period Conversion
class PeriodConverter(dates.DateConverter):
@staticmethod
def convert(values, units, axis):
if is_nested_list_like(values):
values = [PeriodConverter._convert_1d(v, units, axis)
for v in values]
else:
values = PeriodConverter._convert_1d(values, units, axis)
return values
@staticmethod
def _convert_1d(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
Period, pydt.date, pydt.time)
if (isinstance(values, valid_types) or is_integer(values) or
is_float(values)):
return get_datevalue(values, axis.freq)
if isinstance(values, PeriodIndex):
return values.asfreq(axis.freq)._values
if isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
if is_period_arraylike(values):
return PeriodIndex(values, freq=axis.freq)._values
if isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
pydt.date, pydt.time)):
return Period(date, freq).ordinal
elif (is_integer(date) or is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '%s'" % date)
def _dt_to_float_ordinal(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if (isinstance(dt, (np.ndarray, Index, ABCSeries)
) and is_datetime64_ns_dtype(dt)):
base = dates.epoch2num(dt.asi8 / 1.0E9)
else:
base = dates.date2num(dt)
return base
# Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
# values might be a 1-d array, or a list-like of arrays.
if is_nested_list_like(values):
values = [DatetimeConverter._convert_1d(v, unit, axis)
for v in values]
else:
values = DatetimeConverter._convert_1d(values, unit, axis)
return values
@staticmethod
def _convert_1d(values, unit, axis):
def try_parse(values):
try:
return _dt_to_float_ordinal(tools.to_datetime(values))
except Exception:
return values
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
return _dt_to_float_ordinal(lib.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (is_integer(values) or is_float(values)):
return values
elif isinstance(values, compat.string_types):
return try_parse(values)
elif isinstance(values, (list, tuple, np.ndarray, Index)):
if isinstance(values, Index):
values = values.values
if not isinstance(values, np.ndarray):
values = com._asarray_tuplesafe(values)
if is_integer_dtype(values) or is_float_dtype(values):
return values
try:
values = tools.to_datetime(values)
if isinstance(values, Index):
values = _dt_to_float_ordinal(values)
else:
values = [_dt_to_float_ordinal(x) for x in values]
except Exception:
values = _dt_to_float_ordinal(values)
return values
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = PandasAutoDateLocator(tz=tz)
majfmt = PandasAutoDateFormatter(majloc, tz=tz)
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)
# matplotlib.dates._UTC has no _utcoffset called by pandas
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
# For mpl > 2.0 the format strings are controlled via rcparams
# so do not mess with them. For mpl < 2.0 change the second
# break point and add a musec break point
if _mpl_le_2_0_0():
self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S'
self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f'
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
def __init__(self, tz):
dates.DateLocator.__init__(self, tz)
self._interval = 1.
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = dates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate %d '
'ticks from %s to %s: exceeds Locator.MAXTICKS'
'* 2 (%d) ') %
(estimate, dmin, dmax, self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += timedelta(microseconds=1e6 - microsecond)
return dt
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1, period)
return np.nonzero(current - previous)[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
if freq == FreqGroup.FR_NS:
periodsperday = 24 * 60 * 60 * 1000000000
elif freq == FreqGroup.FR_US:
periodsperday = 24 * 60 * 60 * 1000000
elif freq == FreqGroup.FR_MS:
periodsperday = 24 * 60 * 60 * 1000
elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
else: # pragma: no cover
raise ValueError("unexpected frequency: %s" % freq)
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FreqGroup.FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FreqGroup.FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),
Period(ordinal=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', np.int64), ('maj', bool),
('min', bool), ('fmt', '|S20')])
info['val'][:] = dates_._values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_ - 1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_ - 1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_ - 1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second %
label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0:
_second_finder(1)
elif span < periodsperday / 6000.0:
_second_finder(2)
elif span < periodsperday / 2400.0:
_second_finder(5)
elif span < periodsperday / 1200.0:
_second_finder(10)
elif span < periodsperday / 800.0:
_second_finder(15)
elif span < periodsperday / 400.0:
_second_finder(30)
elif span < periodsperday / 150.0:
_minute_finder(1)
elif span < periodsperday / 70.0:
_minute_finder(2)
elif span < periodsperday / 24.0:
_minute_finder(5)
elif span < periodsperday / 12.0:
_minute_finder(15)
elif span < periodsperday / 6.0:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FreqGroup.FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
return info
def _monthly_finder(vmin, vmax, freq):
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
return info
def _quarterly_finder(vmin, vmax, freq):
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
return info
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
return info
def get_finder(freq):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
fgroup = frequencies.get_freq_group(freq)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
else: # pragma: no cover
errmsg = "Unsupported frequency: %s" % (freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = lrange(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
# -------------------------------------------------------------------------
# --- Formatter ---
# -------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = dict([(x, f) for (x, _, _, f) in format])
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
class TimeSeries_TimedeltaFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`.
"""
@staticmethod
def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s
def __call__(self, x, pos=0):
(vmin, vmax) = tuple(self.axis.get_view_interval())
n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
|
|
"""SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py 2014/01/04 01:12:18 root"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import random
import uuid
from glanceclient.v2 import schemas
import mock
from osc_lib.cli import format_columns
import warlock
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit import utils
image_id = '0f41529e-7c12-4de8-be2d-181abb825b3c'
image_name = 'graven'
image_owner = 'baal'
image_protected = False
image_visibility = 'public'
image_tags = []
image_size = 0
IMAGE = {
'id': image_id,
'name': image_name,
'owner': image_owner,
'protected': image_protected,
'visibility': image_visibility,
'tags': image_tags,
'size': image_size
}
IMAGE_columns = tuple(sorted(IMAGE))
IMAGE_data = tuple((IMAGE[x] for x in sorted(IMAGE)))
IMAGE_SHOW = copy.copy(IMAGE)
IMAGE_SHOW['tags'] = format_columns.ListColumn(IMAGE_SHOW['tags'])
IMAGE_SHOW_data = tuple((IMAGE_SHOW[x] for x in sorted(IMAGE_SHOW)))
# Just enough v2 schema to do some testing
IMAGE_schema = {
"additionalProperties": {
"type": "string"
},
"name": "image",
"links": [
{
"href": "{self}",
"rel": "self"
},
{
"href": "{file}",
"rel": "enclosure"
},
{
"href": "{schema}",
"rel": "describedby"
}
],
"properties": {
"id": {
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", # noqa
"type": "string",
"description": "An identifier for the image"
},
"name": {
"type": [
"null",
"string"
],
"description": "Descriptive name for the image",
"maxLength": 255
},
"owner": {
"type": [
"null",
"string"
],
"description": "Owner of the image",
"maxLength": 255
},
"protected": {
"type": "boolean",
"description": "If true, image will not be deletable."
},
"self": {
"type": "string",
"description": "(READ-ONLY)"
},
"schema": {
"type": "string",
"description": "(READ-ONLY)"
},
"size": {
"type": [
"null",
"integer",
"string"
],
"description": "Size of image file in bytes (READ-ONLY)"
},
"status": {
"enum": [
"queued",
"saving",
"active",
"killed",
"deleted",
"pending_delete"
],
"type": "string",
"description": "Status of the image (READ-ONLY)"
},
"tags": {
"items": {
"type": "string",
"maxLength": 255
},
"type": "array",
"description": "List of strings related to the image"
},
"visibility": {
"enum": [
"public",
"private"
],
"type": "string",
"description": "Scope of image accessibility"
},
}
}
class FakeImagev2Client(object):
def __init__(self, **kwargs):
self.images = mock.Mock()
self.images.resource_class = fakes.FakeResource(None, {})
self.image_members = mock.Mock()
self.image_members.resource_class = fakes.FakeResource(None, {})
self.image_tags = mock.Mock()
self.image_tags.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
self.version = 2.0
class TestImagev2(utils.TestCommand):
def setUp(self):
super(TestImagev2, self).setUp()
self.app.client_manager.image = FakeImagev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
class FakeImage(object):
"""Fake one or more images.
TODO(xiexs): Currently, only image API v2 is supported by this class.
"""
@staticmethod
def create_one_image(attrs=None):
"""Create a fake image.
:param Dictionary attrs:
A dictionary with all attrbutes of image
:return:
A FakeResource object with id, name, owner, protected,
visibility, tags and size attrs
"""
attrs = attrs or {}
# Set default attribute
image_info = {
'id': str(uuid.uuid4()),
'name': 'image-name' + uuid.uuid4().hex,
'owner': 'image-owner' + uuid.uuid4().hex,
'protected': bool(random.choice([0, 1])),
'visibility': random.choice(['public', 'private']),
'tags': [uuid.uuid4().hex for r in range(2)],
}
# Overwrite default attributes if there are some attributes set
image_info.update(attrs)
# Set up the schema
model = warlock.model_factory(
IMAGE_schema,
schemas.SchemaBasedModel,
)
return model(**image_info)
@staticmethod
def create_images(attrs=None, count=2):
"""Create multiple fake images.
:param Dictionary attrs:
A dictionary with all attributes of image
:param Integer count:
The number of images to be faked
:return:
A list of FakeResource objects
"""
images = []
for n in range(0, count):
images.append(FakeImage.create_one_image(attrs))
return images
@staticmethod
def get_images(images=None, count=2):
"""Get an iterable MagicMock object with a list of faked images.
If images list is provided, then initialize the Mock object with the
list. Otherwise create one.
:param List images:
A list of FakeResource objects faking images
:param Integer count:
The number of images to be faked
:return:
An iterable Mock object with side_effect set to a list of faked
images
"""
if images is None:
images = FakeImage.create_images(count)
return mock.Mock(side_effect=images)
@staticmethod
def get_image_columns(image=None):
"""Get the image columns from a faked image object.
:param image:
A FakeResource objects faking image
:return:
A tuple which may include the following keys:
('id', 'name', 'owner', 'protected', 'visibility', 'tags')
"""
if image is not None:
return tuple(sorted(image))
return IMAGE_columns
@staticmethod
def get_image_data(image=None):
"""Get the image data from a faked image object.
:param image:
A FakeResource objects faking image
:return:
A tuple which may include the following values:
('image-123', 'image-foo', 'admin', False, 'public', 'bar, baz')
"""
data_list = []
if image is not None:
for x in sorted(image.keys()):
if x == 'tags':
# The 'tags' should be format_list
data_list.append(
format_columns.ListColumn(getattr(image, x)))
else:
data_list.append(getattr(image, x))
return tuple(data_list)
@staticmethod
def create_one_image_member(attrs=None):
"""Create a fake image member.
:param Dictionary attrs:
A dictionary with all attributes of image member
:return:
A FakeResource object with member_id, image_id and so on
"""
attrs = attrs or {}
# Set default attribute
image_member_info = {
'member_id': 'member-id-' + uuid.uuid4().hex,
'image_id': 'image-id-' + uuid.uuid4().hex,
'status': 'pending',
}
# Overwrite default attributes if there are some attributes set
image_member_info.update(attrs)
image_member = fakes.FakeModel(
copy.deepcopy(image_member_info))
return image_member
|
|
"""
The `compat` module provides support for backwards compatibility with older
versions of Django/Python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
import django
from django.conf import settings
from django.db import connection, transaction
from django.utils import six
from django.views.generic import View
try:
import importlib # Available in Python 3.1+
except ImportError:
from django.utils import importlib # Will be removed in Django 1.9
def unicode_repr(instance):
# Get the repr of an instance, but ensure it is a unicode string
# on both python 3 (already the case) and 2 (not the case).
if six.PY2:
return repr(instance).decode('utf-8')
return repr(instance)
def unicode_to_repr(value):
# Coerce a unicode string to the correct repr return type, depending on
# the Python version. We wrap all our `__repr__` implementations with
# this and then use unicode throughout internally.
if six.PY2:
return value.encode('utf-8')
return value
def unicode_http_header(value):
# Coerce HTTP header value to unicode.
if isinstance(value, six.binary_type):
return value.decode('iso-8859-1')
return value
def total_seconds(timedelta):
# TimeDelta.total_seconds() is only available in Python 2.7
if hasattr(timedelta, 'total_seconds'):
return timedelta.total_seconds()
else:
return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0)
def distinct(queryset, base):
if settings.DATABASES[queryset.db]["ENGINE"] == "django.db.backends.oracle":
# distinct analogue for Oracle users
return base.filter(pk__in=set(queryset.values_list('pk', flat=True)))
return queryset.distinct()
# contrib.postgres only supported from 1.8 onwards.
try:
from django.contrib.postgres import fields as postgres_fields
except ImportError:
postgres_fields = None
# JSONField is only supported from 1.9 onwards
try:
from django.contrib.postgres.fields import JSONField
except ImportError:
JSONField = None
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
# django-crispy-forms is optional
try:
import crispy_forms
except ImportError:
crispy_forms = None
if django.VERSION >= (1, 6):
def clean_manytomany_helptext(text):
return text
else:
# Up to version 1.5 many to many fields automatically suffix
# the `help_text` attribute with hardcoded text.
def clean_manytomany_helptext(text):
if text.endswith(' Hold down "Control", or "Command" on a Mac, to select more than one.'):
text = text[:-69]
return text
# Django-guardian is optional. Import only if guardian is in INSTALLED_APPS
# Fixes (#1712). We keep the try/except for the test suite.
guardian = None
try:
if 'guardian' in settings.INSTALLED_APPS:
import guardian
import guardian.shortcuts # Fixes #1624
except ImportError:
pass
# MinValueValidator, MaxValueValidator et al. only accept `message` in 1.8+
if django.VERSION >= (1, 8):
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.validators import MinLengthValidator, MaxLengthValidator
else:
from django.core.validators import MinValueValidator as DjangoMinValueValidator
from django.core.validators import MaxValueValidator as DjangoMaxValueValidator
from django.core.validators import MinLengthValidator as DjangoMinLengthValidator
from django.core.validators import MaxLengthValidator as DjangoMaxLengthValidator
class MinValueValidator(DjangoMinValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinValueValidator, self).__init__(*args, **kwargs)
class MaxValueValidator(DjangoMaxValueValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxValueValidator, self).__init__(*args, **kwargs)
class MinLengthValidator(DjangoMinLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MinLengthValidator, self).__init__(*args, **kwargs)
class MaxLengthValidator(DjangoMaxLengthValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(MaxLengthValidator, self).__init__(*args, **kwargs)
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# `separators` argument to `json.dumps()` differs between 2.x and 3.x
# See: http://bugs.python.org/issue22767
if six.PY3:
SHORT_SEPARATORS = (',', ':')
LONG_SEPARATORS = (', ', ': ')
INDENT_SEPARATORS = (',', ': ')
else:
SHORT_SEPARATORS = (b',', b':')
LONG_SEPARATORS = (b', ', b': ')
INDENT_SEPARATORS = (b',', b': ')
if django.VERSION >= (1, 8):
from django.db.models import DurationField
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
else:
DurationField = duration_string = parse_duration = None
def set_rollback():
if hasattr(transaction, 'set_rollback'):
if connection.settings_dict.get('ATOMIC_REQUESTS', False):
# If running in >=1.6 then mark a rollback as required,
# and allow it to be handled by Django.
if connection.in_atomic_block:
transaction.set_rollback(True)
elif transaction.is_managed():
# Otherwise handle it explicitly if in managed mode.
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
else:
# transaction not managed
pass
|
|
from subprocess import check_call
import datetime
import itertools
import json
import logging
import os
import shutil
import re
import concurrent.futures
from sgfs import SGFS
from sgsession import Session, Entity
from shotgun_api3.shotgun import Fault as ShotgunFault
from . import utils
from . import versions
log = logging.getLogger(__name__)
_kwarg_to_field = {
'created_by': 'created_by',
'description': 'description',
'frames_path': 'sg_path_to_frames',
'movie_path': 'sg_path_to_movie',
# 'movie_url': 'sg_qt', # leaving this one out until we figure out URLs better.
'source_publish': 'sg_source_publish',
'source_publishes': 'sg_source_publishes', # deprecated
'trigger_event': 'sg_trigger_event_id',
}
class Publisher(object):
"""A publishing assistant.
This object encapsulates the logic for the required two-stage creation cycle
of a Shotgun ``PublishEvent``.
This object is used as a context manager such that it will cleanup
the first stage of the commit if there is an exception::
>>> with sgpublish.Publisher(link=task, type="maya_scene", code=name,
... ) as publisher:
... publisher.add_file(scene_file)
The names of the parameters and attributes are largely the same as that of
the underlying ``PublishEvent`` itself, albeit with the ``"sg_"`` prefix
removed.
:param link: The Shotgun entity to attach to.
:type link: :class:`python:dict` or :class:`~sgsession.entity.Entity`
:param str type: A code for the type of publish. This is significant to the
user and publish handlers.
:param str code: A name for the stream of publishes.
:param path: The directory to create for the publish. If ``None``, this will
be generated via the ``"{type}_publish"`` :class:`sgfs.Template
<sgfs.template.Template>` found for the given ``link``.
:type path: str or None
:param str description: The publish's description; can be provided via an
attribute before :meth:`.commit`.
:param created_by: A Shotgun ``HumanUser`` for the publish to be attached to.
``None`` will result in a guess via :func:`.guess_shotgun_user`.
:type created_by: :class:`~sgsession.entity.Entity`, :class:`dict`, or None
:param sgfs: The SGFS to use. Will be pulled from the link's session if not
provided.
:type sgfs: :class:`~sgfs.sgfs.SGFS` or None
"""
def __init__(self, link=None, type=None, name=None, version=None, parent=None,
directory=None, sgfs=None, template=None, **kwargs
):
if not sgfs:
if isinstance(template, Entity):
sgfs = SGFS(session=template.session)
elif isinstance(link, Entity):
sgfs = SGFS(session=link.session)
else:
sgfs = SGFS()
self.sgfs = sgfs
if template:
template = sgfs.session.merge(template)
to_fetch = ['sg_link', 'sg_type', 'code', 'sg_version']
to_fetch.extend(_kwarg_to_field.itervalues())
template.fetch(to_fetch)
tpl_link, tpl_type, tpl_name, tpl_version = template.get(('sg_link', 'sg_type', 'code', 'sg_version'))
link = link or tpl_link
type = type or tpl_type
name = name or tpl_name
version = version or tpl_version
kwargs.setdefault('source_publish', template)
kwargs.setdefault('source_publishes', [template])
for key, field in _kwarg_to_field.iteritems():
kwargs.setdefault(key, template.get(field))
if not kwargs.get('thumbnail_path'):
# We certainly jump through a lot of hoops to do this...
# Perhaps this should be sgfs.get_entity_tags(entity)
publish_path = sgfs.path_for_entity(template)
if publish_path:
tags = sgfs.get_directory_entity_tags(publish_path)
tags = [tag for tag in tags if tag['entity'] == template]
if tags:
meta = tags[0].get('sgpublish', {})
thumbnail = meta.get('thumbnail')
if thumbnail:
kwargs['thumbnail_path'] = os.path.join(publish_path, thumbnail)
if not (link and type and name):
raise ValueError('requires link, type, and name')
self._type = str(type)
self._link = self.sgfs.session.merge(link)
self._name = str(name)
self._parent = parent
if re.search(r'[^\w-]', self._name):
raise ValueError('name cannot have spaces or special characters', self._name)
# Get information about the promotion for review.
self._review_version_entity = None
self._review_version_fields = kwargs.pop('review_version_fields', None)
# To only allow us to commit once.
self._committed = False
# Will be set into the tag.
self.metadata = {}
# Files to copy on commit; (src_path, dst_path)
self._files = []
# Set attributes from kwargs.
for name in (
'created_by',
'description',
'frames_path',
'movie_path',
'movie_url',
'path',
'source_publish',
'source_publishes',
'thumbnail_path',
'trigger_event',
'extra_fields',
):
setattr(self, name, kwargs.pop(name, None))
if kwargs:
raise TypeError('too many kwargs: %r' % sorted(kwargs))
# Required for normalizing.
self._directory = None
# Get everything into the right type before sending it to Shotgun.
self._normalize_attributes()
# Prep for async processes. We can do a lot of "frivolous" Shotgun
# queries at the same time since we must do at least one.
executor = concurrent.futures.ThreadPoolExecutor(8)
futures = []
# Figure out the version number (async).
if version is None:
futures.append(executor.submit(self._set_automatic_version))
else:
self._version = int(version)
# Grab all data on the link (assuming that is all that is used when
# creating publish templates).
futures.append(executor.submit(self.link.fetch_core))
# Create the review version stub (async).
if self._review_version_fields is not None:
futures.append(executor.submit(self._get_review_version))
# First stage of the publish: create an "empty" PublishEvent.
initial_data = {
'code': self.name,
'created_by': self.created_by,
'description': self.description,
'project': self.link.project(),
'sg_link': self.link,
'sg_path_to_frames': self.frames_path,
'sg_path_to_movie': self.movie_path,
'sg_qt': self.movie_url,
'sg_source_publish': self.source_publish or None, # singular
'sg_source_publishes': self.source_publishes or [], # multiple
'sg_trigger_event_id': self.trigger_event['id'] if self.trigger_event else None,
'sg_type': self.type,
'sg_version': 0, # Signifies that this is "empty".
}
initial_data.update(self.extra_fields)
try:
self.entity = self.sgfs.session.create('PublishEvent', initial_data)
except ShotgunFault:
if not self.link.exists():
raise RuntimeError('%s %d (%r) has been retired' % (link['type'], link['id'], link.get('name')))
else:
raise
# Lets have our async processes catch up.
for future in futures:
future.result()
# Manually forced directory.
if directory is not None:
self._directory_supplied = True
# Make it if it doesn't already exist, but don't care if it does.
self._directory = os.path.abspath(directory)
else:
self._directory_supplied = False
# Find a unique name using the template result as a base.
base_path = self.sgfs.path_from_template(link, '%s_publish' % type, dict(
publish=self, # For b/c.
publisher=self,
PublishEvent=self.entity,
self=self.entity, # To mimick Shotgun templates.
))
unique_iter = ('%s_%d' % (base_path, i) for i in itertools.count(1))
for path in itertools.chain([base_path], unique_iter):
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17: # File exists
raise
else:
self._directory = path
break
# Make the directory so that tools which want to manually copy files
# don't have to.
utils.makedirs(self._directory)
# If the directory is tagged with existing entities, then we cannot
# proceed. This allows one to retire a publish and then overwrite it.
tags = self.sgfs.get_directory_entity_tags(self._directory)
if any(tag['entity'].exists() for tag in tags):
raise ValueError('directory is already tagged: %r' % self._directory)
def _set_automatic_version(self):
existing_entities = self.sgfs.session.find(
'PublishEvent',
[
('sg_link', 'is', self.link),
('sg_type', 'is', self.type),
('code', 'is', self.name),
],
['sg_version', 'created_at'],
)
self._version = 1
for e in existing_entities:
# Only increment for non-failed commits.
if e['sg_version']:
self._version = e['sg_version'] + 1
self._parent = e
def _normalize_url(self, url):
if url is None:
return
if isinstance(url, dict):
return url
if isinstance(url, basestring):
return {'url': url}
return {'url': str(url)}
def _normalize_attributes(self):
self.created_by = self.created_by or self.sgfs.session.guess_user()
self.description = str(self.description or '') or None
self.movie_url = self._normalize_url(self.movie_url) or None
self.source_publishes = self.source_publishes if self.source_publishes is not None else []
if isinstance(self.trigger_event, int):
self.trigger_event = {'type': 'EventLogEntry', 'id': self.trigger_event}
else:
self.trigger_event = self.trigger_event or None
self.extra_fields = {} if self.extra_fields is None else self.extra_fields
# This is uploaded, so not relative.
self.thumbnail_path = str(self.thumbnail_path or '') or None
# Descriptive paths are relative to the directory.
if self._directory is not None:
self.frames_path = os.path.join(self._directory, self.frames_path) if self.frames_path else None
self.movie_path = os.path.join(self._directory, self.movie_path) if self.movie_path else None
self.path = os.path.join(self._directory, self.path) if self.path else None
@property
def type(self):
return self._type
@property
def link(self):
return self._link
@property
def name(self):
return self._name
@property
def id(self):
"""The ID of the PublishEvent."""
return self.entity['id']
@property
def version(self):
"""The version of the PublishEvent."""
return self._version
@property
def review_version_entity(self):
"""The stub of the review Version, or None."""
return self._review_version_entity
@property
def review_version_fields(self):
"""The stub of the review fields, or None."""
return self._review_version_fields
@property
def directory(self):
"""The path into which all files must be placed."""
return self._directory
def isabs(self, dst_name):
"""Is the given path absolute and within the publish directory?"""
return dst_name.startswith(self._directory)
def abspath(self, dst_name):
"""Get the abspath of the given name within the publish directory.
If it is already within the directory, then makes no change to the path.
"""
if self.isabs(dst_name):
return dst_name
else:
return os.path.join(self._directory, dst_name.lstrip('/'))
def add_file(self, src_path, dst_name=None, make_unique=False, method='copy', immediate=False):
"""Queue a file (or folder) to be copied into the publish.
:param str src_path: The path to copy into the publish.
:param dst_name: Where to copy it to.
:type dst_name: str or None.
``dst_name`` will default to the basename of the source path. ``dst_name``
will be treated as relative to the :attr:`.path` if it is not contained
withing the :attr:`.directory`.
"""
dst_name = dst_name or os.path.basename(src_path)
if make_unique:
dst_name = self.unique_name(dst_name)
elif self.file_exists(dst_name):
raise ValueError('the file already exists in the publish')
dst_path = self.abspath(dst_name)
if method not in ('copy', 'move'):
raise ValueError('bad add_file method %r' % method)
if immediate:
self._add_file(src_path, dst_path, method)
else:
self._files.append((src_path, dst_path, method))
return dst_path
def _add_file(self, src_path, dst_path, method):
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
if method == 'copy':
shutil.copy(src_path, dst_path)
elif method == 'move':
shutil.move(src_path, dst_path)
else:
raise RuntimeError('bad add_file method %r' % method)
def add_files(self, files, relative_to=None, **kwargs):
for i, path in enumerate(files):
if relative_to:
# The publish will be structured relative to the given root.
rel_path = os.path.relpath(path, relative_to)
if utils.has_pardir(rel_path):
log.warning('%s is not within %s' % (path, relative_to))
rel_path = utils.strip_pardir(path)
dst_path = self.add_file(path, rel_path, **kwargs)
else:
dst_path = self.add_file(path, **kwargs)
# Set the publish's "path" to that of the first file.
if not i and self.path is None:
self.path = dst_path
def file_exists(self, dst_name):
"""If added via :meth:`.add_file`, would it clash with an existing file?"""
dst_path = self.abspath(dst_name)
return os.path.exists(dst_path) or any(x[1] == dst_path for x in self._files)
def unique_name(self, dst_name):
"""Append numbers to the end of the name if nessesary to make the name
unique for :meth:`.add_file`.
"""
if not self.file_exists(dst_name):
return dst_name
base, ext = os.path.splitext(dst_name)
for i in itertools.count(1):
unique_name = '%s_%d%s' % (base, i, ext)
if not self.file_exists(unique_name):
return unique_name
def commit(self):
# As soon as one publish attempt is made, we force a full retry.
if self._committed:
raise ValueError('publish already comitted')
self._committed = True
# Cleanup all user-settable attributes that are sent to Shotgun.
self._normalize_attributes()
try:
updates = {
'description': self.description,
'sg_path': self.path,
'sg_path_to_frames': self.frames_path,
'sg_path_to_movie': self.movie_path,
'sg_qt': self.movie_url,
'sg_source_publishes': self.source_publishes or [],
'sg_trigger_event_id': self.trigger_event['id'] if self.trigger_event else None,
'sg_version': self._version,
'sg_metadata': json.dumps(self.metadata),
}
updates.update(self.extra_fields)
# Force the updated into the entity for the tag, since the Shotgun
# update may not complete by the time that we tag the directory
# or promote for review.
self.entity.update(updates)
executor = concurrent.futures.ThreadPoolExecutor(4)
futures = []
# Start the second stage of the publish.
futures.append(executor.submit(self.sgfs.session.update,
'PublishEvent',
self.entity['id'],
updates,
))
if self.thumbnail_path:
# Start the thumbnail upload in the background.
futures.append(executor.submit(self.sgfs.session.upload_thumbnail,
self.entity['type'],
self.entity['id'],
self.thumbnail_path,
))
# Schedule it for copy.
thumbnail_name = os.path.relpath(self.thumbnail_path, self.directory)
if thumbnail_name.startswith('.'):
thumbnail_name = 'thumbnail' + os.path.splitext(self.thumbnail_path)[1]
thumbnail_name = self.add_file(
self.thumbnail_path,
thumbnail_name,
make_unique=True
)
# Copy in the scheduled files.
for file_args in self._files:
self._add_file(*file_args)
# Set permissions. I would like to own it by root, but we need root
# to do that. We also leave the directory writable, but sticky.
check_call(['chmod', '-R', 'a=rX', self._directory])
check_call(['chmod', 'a+t,u+w', self._directory])
# Wait for the Shotgun updates.
for future in futures:
future.result()
# Tag the directory. Ideally we would like to do this before the
# futures are waited for, but we only want to tag the directory
# if everything was successful.
our_metadata = {}
if self._parent:
our_metadata['parent'] = self.sgfs.session.merge(self._parent).minimal
if self.thumbnail_path:
our_metadata['thumbnail'] = thumbnail_name.encode('utf8') if isinstance(thumbnail_name, unicode) else thumbnail_name
full_metadata = dict(self.metadata)
full_metadata['sgpublish'] = our_metadata
self.sgfs.tag_directory_with_entity(self._directory, self.entity, full_metadata)
# Again, we would like to do with with the futures, but the current
# version of this depends on the directory being tagged.
if self._review_version_fields is not None:
self._promote_for_review()
except:
self.rollback()
raise
def __enter__(self):
return self
def rollback(self):
# Remove the entity's ID.
id_ = self.entity.pop('id', None) or 0
# Attempt to set the version to 0 on Shotgun.
if id_ and self.entity.get('sg_version'):
self.sgfs.session.update('PublishEvent', id_, {'sg_version': 0})
# Move the folder aside.
if not self._directory_supplied and os.path.exists(self._directory):
failed_directory = '%s.%d.failed' % (self._directory, id_)
check_call(['mv', self._directory, failed_directory])
self._directory = failed_directory
def __exit__(self, *exc_info):
if exc_info and exc_info[0] is not None:
self.rollback()
return
self.commit()
def _get_review_version(self):
"""Get a Version entity which will reference the PublishEvent once done.
MUST call :meth:`promote_for_review` to finalize this entity.
"""
if self._review_version_entity is None:
self._review_version_entity = self.sgfs.session.create('Version', {
'code': 'stub for publishing',
'created_by': self.created_by,
'project': self.link.project(),
})
return self._review_version_entity
def _promote_for_review(self):
if not self._committed:
raise RuntimeError('can only promote AFTER publishing commits')
kwargs = dict(self._review_version_fields or {})
if self._review_version_entity:
kwargs.setdefault('version_entity', self._review_version_entity)
return versions.promote_publish(self.entity, **kwargs)
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1RuntimeClassList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1RuntimeClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1RuntimeClassList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1RuntimeClassList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1RuntimeClassList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1RuntimeClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1RuntimeClassList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1RuntimeClassList. # noqa: E501
Items is a list of schema objects. # noqa: E501
:return: The items of this V1RuntimeClassList. # noqa: E501
:rtype: list[V1RuntimeClass]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1RuntimeClassList.
Items is a list of schema objects. # noqa: E501
:param items: The items of this V1RuntimeClassList. # noqa: E501
:type: list[V1RuntimeClass]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1RuntimeClassList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1RuntimeClassList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1RuntimeClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1RuntimeClassList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1RuntimeClassList. # noqa: E501
:return: The metadata of this V1RuntimeClassList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1RuntimeClassList.
:param metadata: The metadata of this V1RuntimeClassList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RuntimeClassList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RuntimeClassList):
return True
return self.to_dict() != other.to_dict()
|
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import platform
import subprocess
import sys
import tempfile
# -----------------------------------------------------------------------------
# Constants
PACKAGE_DIR = os.path.dirname(os.path.realpath(__file__))
WORKSPACE_DIR = os.path.dirname(PACKAGE_DIR)
SOURCES_DIR = os.path.join(PACKAGE_DIR, "Sources")
SWIFTSYNTAX_DIR = os.path.join(SOURCES_DIR, "SwiftSyntax")
SWIFTSYNTAXBUILDER_DIR = os.path.join(SOURCES_DIR, "SwiftSyntaxBuilder")
SWIFTSYNTAXPARSER_DIR = os.path.join(SOURCES_DIR, "SwiftSyntaxParser")
LLVM_DIR = os.path.join(WORKSPACE_DIR, "llvm-project", "llvm")
SWIFT_DIR = os.path.join(WORKSPACE_DIR, "swift")
INCR_TRANSFER_ROUNDTRIP_EXEC = os.path.join(
SWIFT_DIR, "utils", "incrparse", "incr_transfer_round_trip.py"
)
GYB_EXEC = os.path.join(SWIFT_DIR, "utils", "gyb")
LIT_EXEC = os.path.join(LLVM_DIR, "utils", "lit", "lit.py")
GROUP_INFO_PATH = os.path.join(PACKAGE_DIR, "utils", "group.json")
BASE_KIND_FILES = {
"Decl": "SyntaxDeclNodes.swift",
"Expr": "SyntaxExprNodes.swift",
"Pattern": "SyntaxPatternNodes.swift",
"Stmt": "SyntaxStmtNodes.swift",
"Syntax": "SyntaxNodes.swift",
"Type": "SyntaxTypeNodes.swift",
}
# -----------------------------------------------------------------------------
# Xcode Projects Generation
def xcode_gen(config):
print("** Generate SwiftSyntax as an Xcode project **")
os.chdir(PACKAGE_DIR)
swiftpm_call = ["swift", "package", "generate-xcodeproj"]
if config:
swiftpm_call.extend(["--xcconfig-overrides", config])
check_call(swiftpm_call)
# -----------------------------------------------------------------------------
# Helpers
def printerr(message):
print(message, file=sys.stderr)
def note(message):
print("--- %s: note: %s" % (os.path.basename(sys.argv[0]), message))
sys.stdout.flush()
def fatal_error(message):
printerr(message)
sys.exit(1)
def escapeCmdArg(arg):
if '"' in arg or " " in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
def call(cmd, env=os.environ, stdout=None, stderr=subprocess.STDOUT, verbose=False):
if verbose:
print(" ".join([escapeCmdArg(arg) for arg in cmd]))
process = subprocess.Popen(cmd, env=env, stdout=stdout, stderr=stderr)
process.wait()
return process.returncode
def check_call(cmd, cwd=None, env=os.environ, verbose=False):
if verbose:
print(" ".join([escapeCmdArg(arg) for arg in cmd]))
return subprocess.check_call(cmd, cwd=cwd, env=env, stderr=subprocess.STDOUT)
def realpath(path):
if path is None:
return None
return os.path.realpath(path)
# -----------------------------------------------------------------------------
# Generating gyb Files
def check_gyb_exec(gyb_exec):
if not os.path.exists(gyb_exec):
fatal_error(
"""
Error: Could not find gyb.
Looking at '%s'.
Make sure you have the main swift repo checked out next to the swift-syntax
repository.
Refer to README.md for more information.
"""
% gyb_exec
)
def check_rsync():
with open(os.devnull, "w") as DEVNULL:
if call(["rsync", "--version"], stdout=DEVNULL) != 0:
fatal_error("Error: Could not find rsync.")
def generate_single_gyb_file(
gyb_exec,
gyb_file,
output_file_name,
destination,
temp_files_dir,
add_source_locations,
additional_gyb_flags,
verbose,
):
# Source locations are added by default by gyb, and cleared by passing
# `--line-directive=` (nothing following the `=`) to the generator. Our
# flag is the reverse; we don't want them by default, only if requested.
line_directive_flags = [] if add_source_locations else ["--line-directive="]
# Generate the new file
gyb_command = [
sys.executable,
gyb_exec,
gyb_file,
"-o",
os.path.join(temp_files_dir, output_file_name),
]
gyb_command += line_directive_flags
gyb_command += additional_gyb_flags
check_call(gyb_command, verbose=verbose)
# Copy the file if different from the file already present in
# gyb_generated
rsync_command = [
"rsync",
"--checksum",
os.path.join(temp_files_dir, output_file_name),
os.path.join(destination, output_file_name),
]
check_call(rsync_command, verbose=verbose)
# Generate the `.swift` files for all `.gyb` files in `sources_dir`. If
# `destination_dir` is not `None`, the resulting files will be written to
# `destination_dir`, otherwise they will be written to
# `sources_dir/gyb_generated`.
def generate_gyb_files_helper(
sources_dir,
destination_dir,
gyb_exec,
add_source_locations,
verbose,
):
temp_files_dir = tempfile.gettempdir()
make_dir_if_needed(temp_files_dir)
if destination_dir is None:
destination_dir = os.path.join(sources_dir, "gyb_generated")
make_dir_if_needed(destination_dir)
# Clear any *.swift files that are relics from the previous run.
clear_gyb_files_from_previous_run(
sources_dir, destination_dir, verbose)
# Generate the new .swift files in `temp_files_dir` and only copy them
# to `destiantion_dir` if they are different than the
# files already residing there. This way we don't touch the generated .swift
# files if they haven't changed and don't trigger a rebuild.
for gyb_file in os.listdir(sources_dir):
if not gyb_file.endswith(".gyb"):
continue
gyb_file_path = os.path.join(sources_dir, gyb_file)
# Slice off the '.gyb' to get the name for the output file
output_file_name = gyb_file[:-4]
generate_single_gyb_file(
gyb_exec,
gyb_file_path,
output_file_name,
destination_dir,
temp_files_dir,
add_source_locations,
additional_gyb_flags=[],
verbose=verbose,
)
# Generate the syntax node `.swift` files from `SyntaxNodes.swift.gyb.template`.
# `destination_dir` is not `None`, the resulting files will be written to
# `destination_dir/syntax_nodes`, otherwise they will be written to
# `sources_dir/gyb_generated/syntax_nodes`.
def generate_syntax_node_template_gyb_files(
destination_dir,
gyb_exec,
add_source_locations,
verbose
):
temp_files_dir = tempfile.gettempdir()
make_dir_if_needed(temp_files_dir)
if destination_dir is None:
destination_dir = os.path.join(SWIFTSYNTAX_DIR, "gyb_generated")
template_destination = os.path.join(destination_dir, "syntax_nodes")
make_dir_if_needed(template_destination)
for previous_gyb_gen_file in os.listdir(template_destination):
if previous_gyb_gen_file.endswith(".swift"):
if previous_gyb_gen_file not in BASE_KIND_FILES.values():
check_call(
["rm", previous_gyb_gen_file],
cwd=template_destination,
verbose=verbose,
)
for base_kind in BASE_KIND_FILES:
output_file_name = BASE_KIND_FILES[base_kind]
gyb_file = os.path.join(
SWIFTSYNTAX_DIR, "SyntaxNodes.swift.gyb.template"
)
generate_single_gyb_file(
gyb_exec,
gyb_file,
output_file_name,
template_destination,
temp_files_dir,
add_source_locations,
additional_gyb_flags=["-DEMIT_KIND=%s" % base_kind],
verbose=verbose,
)
def generate_gyb_files(
gyb_exec, verbose, add_source_locations,
swiftsyntax_destination=None, swiftsyntaxbuilder_destination=None,
swiftsyntaxparser_destination=None,
):
print("** Generating gyb Files **")
check_gyb_exec(gyb_exec)
check_rsync()
generate_gyb_files_helper(
SWIFTSYNTAX_DIR,
swiftsyntax_destination,
gyb_exec,
add_source_locations,
verbose
)
generate_gyb_files_helper(
SWIFTSYNTAXBUILDER_DIR,
swiftsyntaxbuilder_destination,
gyb_exec,
add_source_locations,
verbose
)
generate_gyb_files_helper(
SWIFTSYNTAXPARSER_DIR,
swiftsyntaxparser_destination,
gyb_exec,
add_source_locations,
verbose
)
generate_syntax_node_template_gyb_files(
swiftsyntax_destination,
gyb_exec,
add_source_locations,
verbose
)
print("Done Generating gyb Files")
def make_dir_if_needed(path):
if not os.path.exists(path):
os.makedirs(path)
# Remove any files in the `gyb_generated` directory that no longer have a
# corresponding `.gyb` file in the `Sources` directory.
def clear_gyb_files_from_previous_run(sources_dir, destination_dir, verbose):
for previous_gyb_gen_file in os.listdir(destination_dir):
if previous_gyb_gen_file.endswith(".swift"):
gyb_file = os.path.join(
sources_dir, previous_gyb_gen_file + ".gyb"
)
if not os.path.exists(gyb_file):
check_call(
["rm", previous_gyb_gen_file],
cwd=destination_dir,
verbose=verbose
)
# -----------------------------------------------------------------------------
# Building SwiftSyntax
def get_swiftpm_invocation(toolchain, action, build_dir, multiroot_data_file, release):
swift_exec = os.path.join(toolchain, "bin", "swift")
swiftpm_call = [swift_exec, action]
swiftpm_call.extend(["--package-path", PACKAGE_DIR])
if platform.system() != "Darwin":
swiftpm_call.extend(["--enable-test-discovery"])
if release:
swiftpm_call.extend(["--configuration", "release"])
if build_dir:
swiftpm_call.extend(["--build-path", build_dir])
if multiroot_data_file:
swiftpm_call.extend(["--multiroot-data-file", multiroot_data_file])
return swiftpm_call
class Builder(object):
def __init__(
self,
toolchain,
build_dir,
multiroot_data_file,
release,
verbose,
disable_sandbox=False,
):
self.swiftpm_call = get_swiftpm_invocation(
toolchain=toolchain,
action="build",
build_dir=build_dir,
multiroot_data_file=multiroot_data_file,
release=release,
)
if disable_sandbox:
self.swiftpm_call.append("--disable-sandbox")
if verbose:
self.swiftpm_call.extend(["--verbose"])
self.verbose = verbose
def build(self, product_name):
print("** Building " + product_name + " **")
command = list(self.swiftpm_call)
command.extend(["--product", product_name])
env = dict(os.environ)
env["SWIFT_BUILD_SCRIPT_ENVIRONMENT"] = "1"
# Tell other projects in the unified build to use local dependencies
env["SWIFTCI_USE_LOCAL_DEPS"] = "1"
check_call(command, env=env, verbose=self.verbose)
# -----------------------------------------------------------------------------
# Testing
def verify_generated_files(gyb_exec, verbose):
user_swiftsyntax_generated_dir = os.path.join(
SWIFTSYNTAX_DIR, "gyb_generated"
)
user_swiftsyntaxbuilder_generated_dir = os.path.join(
SWIFTSYNTAXBUILDER_DIR, "gyb_generated"
)
user_swiftsyntaxparser_generated_dir = os.path.join(
SWIFTSYNTAXPARSER_DIR, "gyb_generated"
)
self_swiftsyntax_generated_dir = tempfile.mkdtemp()
self_swiftsyntaxbuilder_generated_dir = tempfile.mkdtemp()
self_swiftsyntaxparser_generated_dir = tempfile.mkdtemp()
generate_gyb_files(
gyb_exec,
verbose=verbose,
add_source_locations=False,
swiftsyntax_destination=self_swiftsyntax_generated_dir,
swiftsyntaxbuilder_destination=self_swiftsyntaxbuilder_generated_dir,
swiftsyntaxparser_destination=self_swiftsyntaxparser_generated_dir,
)
check_generated_files_match(self_swiftsyntax_generated_dir,
user_swiftsyntax_generated_dir)
check_generated_files_match(self_swiftsyntaxbuilder_generated_dir,
user_swiftsyntaxbuilder_generated_dir)
check_generated_files_match(self_swiftsyntaxparser_generated_dir,
user_swiftsyntaxparser_generated_dir)
def check_generated_files_match(self_generated_dir, user_generated_dir):
command = [
"diff",
"--recursive",
"--exclude",
".*", # Exclude dot files like .DS_Store
"--context=0",
self_generated_dir,
user_generated_dir,
]
check_call(command)
def verify_c_syntax_nodes_match():
print("** Validating that the C data types match **")
swift_syntax_c_definitions = os.path.join(
SOURCES_DIR, "_CSwiftSyntax", "include", "c-syntax-nodes.h")
swiftc_c_definitions = os.path.join(
SWIFT_DIR, "include", "swift-c", "SyntaxParser",
"SwiftSyntaxCDataTypes.h")
check_call([
"diff",
swift_syntax_c_definitions,
swiftc_c_definitions,
])
def run_tests(
toolchain, build_dir, multiroot_data_file, release, filecheck_exec,
skip_lit_tests, verbose
):
print("** Running SwiftSyntax Tests **")
if skip_lit_tests:
lit_success = True
else:
lit_success = run_lit_tests(
toolchain=toolchain,
build_dir=build_dir,
release=release,
filecheck_exec=filecheck_exec,
verbose=verbose,
)
if not lit_success:
return False
xctest_success = run_xctests(
toolchain=toolchain,
build_dir=build_dir,
multiroot_data_file=multiroot_data_file,
release=release,
verbose=verbose,
)
if not xctest_success:
return False
return True
# -----------------------------------------------------------------------------
# Lit Tests
def check_lit_exec():
if not os.path.exists(LIT_EXEC):
fatal_error(
"""
Error: Could not find lit.py.
Looking at '%s'.
Make sure you have the llvm repo checked out next to the swift-syntax repo.
Refer to README.md for more information.
"""
% LIT_EXEC
)
def check_incr_transfer_roundtrip_exec():
if not os.path.exists(INCR_TRANSFER_ROUNDTRIP_EXEC):
fatal_error(
"""
Error: Could not find incr_transfer_round_trip.py.
Make sure you have the main swift repo checked out next to the swift-syntax
repo.
Refer to README.md for more information.
"""
)
def find_lit_test_helper_exec(toolchain, build_dir, release):
swiftpm_call = get_swiftpm_invocation(
toolchain=toolchain,
action="build",
build_dir=build_dir,
multiroot_data_file=None,
release=release,
)
swiftpm_call.extend(["--product", "lit-test-helper"])
swiftpm_call.extend(["--show-bin-path"])
bin_dir = subprocess.check_output(swiftpm_call)
return os.path.join(bin_dir.strip(), "lit-test-helper")
def run_lit_tests(toolchain, build_dir, release, filecheck_exec, verbose):
print("** Running lit-based tests **")
check_lit_exec()
check_incr_transfer_roundtrip_exec()
lit_test_helper_exec = find_lit_test_helper_exec(
toolchain=toolchain, build_dir=build_dir, release=release
)
lit_call = ["python3", LIT_EXEC]
lit_call.append(os.path.join(PACKAGE_DIR, "lit_tests"))
if filecheck_exec:
lit_call.extend(["--param", "FILECHECK=" + filecheck_exec])
if lit_test_helper_exec:
lit_call.extend(["--param", "LIT_TEST_HELPER=" + lit_test_helper_exec])
lit_call.extend(
["--param", "INCR_TRANSFER_ROUND_TRIP.PY=" + INCR_TRANSFER_ROUNDTRIP_EXEC]
)
# Print all failures
lit_call.extend(["--verbose"])
# Don't show all commands if verbose is not enabled
if not verbose:
lit_call.extend(["--succinct"])
return call(lit_call, verbose=verbose) == 0
# -----------------------------------------------------------------------------
# XCTest Tests
def run_xctests(toolchain, build_dir, multiroot_data_file, release, verbose):
print("** Running XCTests **")
swiftpm_call = get_swiftpm_invocation(
toolchain=toolchain,
action="test",
build_dir=build_dir,
multiroot_data_file=multiroot_data_file,
release=release,
)
if verbose:
swiftpm_call.extend(["--verbose"])
swiftpm_call.extend(["--test-product", "SwiftSyntaxPackageTests"])
env = dict(os.environ)
env["SWIFT_BUILD_SCRIPT_ENVIRONMENT"] = "1"
# Tell other projects in the unified build to use local dependencies
env["SWIFTCI_USE_LOCAL_DEPS"] = "1"
return call(swiftpm_call, env=env, verbose=verbose) == 0
# -----------------------------------------------------------------------------
# Arugment Parsing
_DESCRIPTION = """
Build and test script for SwiftSyntax.
Build SwiftSyntax by generating all necessary files form the corresponding
.swift.gyb files first. For this, SwiftSyntax needs to be check out alongside
the main swift repo (http://github.com/apple/swift/) in the following structure
- (containing directory)
- swift
- swift-syntax
It is not necessary to build the compiler project.
The build script can also drive the test suite included in the SwiftSyntax
repo. This requires a custom build of the compiler project since it accesses
test utilities that are not shipped as part of the toolchains. See the Testing
section for arguments that need to be specified for this.
"""
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=_DESCRIPTION
)
# -------------------------------------------------------------------------
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging."
)
# -------------------------------------------------------------------------
xcode_project_group = parser.add_argument_group("Xcode Project")
xcode_project_group.add_argument(
"--generate-xcodeproj",
action="store_true",
help="Generate an Xcode project for SwiftSyntax.",
)
xcode_project_group.add_argument(
"--xcconfig-path",
help="The path to an xcconfig file for generating Xcode projct.",
)
# -------------------------------------------------------------------------
build_group = parser.add_argument_group("Build")
build_group.add_argument(
"-r", "--release", action="store_true", help="Build in release mode."
)
build_group.add_argument(
"--build-dir",
default=None,
help="The directory in which build products shall be put. If omitted "
'a directory named ".build" will be put in the swift-syntax '
"directory.",
)
build_group.add_argument(
"--add-source-locations",
action="store_true",
help="Insert ###sourceLocation comments in generated code for "
"line-directive.",
)
build_group.add_argument(
"--degyb-only",
action="store_true",
help="The script only generates swift files from gyb and skips the "
"rest of the build",
)
build_group.add_argument(
"--disable-sandbox",
action="store_true",
help="Disable sandboxes when building with SwiftPM",
)
build_group.add_argument(
"--multiroot-data-file",
help="Path to an Xcode workspace to create a unified build of "
"SwiftSyntax with other projects.",
)
build_group.add_argument(
"--toolchain",
required=True,
help="The path to the toolchain that shall be used to build " "SwiftSyntax.",
)
# -------------------------------------------------------------------------
test_group = parser.add_argument_group("Test")
test_group.add_argument("-t", "--test", action="store_true", help="Run tests")
test_group.add_argument("--skip-lit-tests", action="store_true",
help="Don't run lit-based tests"
)
test_group.add_argument(
"--filecheck-exec",
default=None,
help="Path to the FileCheck executable that was built as part of the "
"LLVM repository. If not specified, it will be looked up from "
"PATH.",
)
test_group.add_argument(
"--gyb-exec",
default=GYB_EXEC,
help="Path to the gyb tool (default: %(default)s).",
)
test_group.add_argument(
"--verify-generated-files",
action="store_true",
help="Instead of generating files using gyb, verify that the files "
"which already exist match the ones that would be generated by "
"this script.",
)
return parser.parse_args()
# -----------------------------------------------------------------------------
def main():
args = parse_args()
try:
if not args.verify_generated_files:
generate_gyb_files(
args.gyb_exec,
verbose=args.verbose,
add_source_locations=args.add_source_locations,
)
except subprocess.CalledProcessError as e:
printerr("FAIL: Generating .gyb files failed")
printerr("Executing: %s" % " ".join(e.cmd))
printerr(e.output)
sys.exit(1)
if args.verify_generated_files:
try:
success = verify_generated_files(args.gyb_exec, verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr(
"FAIL: Gyb-generated files committed to repository do "
"not match generated ones. Please re-generate the "
"gyb-files and recommit them."
)
sys.exit(1)
# Skip the rest of the build if we should perform degyb only
if args.degyb_only:
sys.exit(0)
verify_c_syntax_nodes_match()
if args.generate_xcodeproj:
xcode_gen(config=args.xcconfig_path)
sys.exit(0)
try:
builder = Builder(
toolchain=args.toolchain,
build_dir=realpath(args.build_dir),
multiroot_data_file=args.multiroot_data_file,
release=args.release,
verbose=args.verbose,
disable_sandbox=args.disable_sandbox,
)
# Until rdar://53881101 is implemented, we cannot request a build of multiple
# targets simultaneously. For now, just build one product after the other.
builder.build("SwiftSyntax")
builder.build("SwiftSyntaxParser")
# Only build lit-test-helper if we are planning to run tests
if args.test:
builder.build("lit-test-helper")
except subprocess.CalledProcessError as e:
printerr("FAIL: Building SwiftSyntax failed")
printerr("Executing: %s" % " ".join(e.cmd))
printerr(e.output)
sys.exit(1)
if args.test:
try:
success = run_tests(
toolchain=args.toolchain,
build_dir=realpath(args.build_dir),
multiroot_data_file=args.multiroot_data_file,
release=args.release,
filecheck_exec=realpath(args.filecheck_exec),
skip_lit_tests=args.skip_lit_tests,
verbose=args.verbose,
)
if not success:
# An error message has already been printed by the failing test
# suite
sys.exit(1)
else:
print("** All tests passed **")
except subprocess.CalledProcessError as e:
printerr("FAIL: Running tests failed")
printerr("Executing: %s" % " ".join(e.cmd))
printerr(e.output)
sys.exit(1)
if __name__ == "__main__":
main()
|
|
"""
Support for Alexa skill service end point.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/alexa/
"""
import asyncio
import enum
import logging
from homeassistant.components import http
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import intent
from homeassistant.util.decorator import Registry
from .const import DOMAIN, SYN_RESOLUTION_MATCH
_LOGGER = logging.getLogger(__name__)
HANDLERS = Registry()
INTENTS_API_ENDPOINT = '/api/alexa'
class SpeechType(enum.Enum):
"""The Alexa speech types."""
plaintext = 'PlainText'
ssml = 'SSML'
SPEECH_MAPPINGS = {
'plain': SpeechType.plaintext,
'ssml': SpeechType.ssml,
}
class CardType(enum.Enum):
"""The Alexa card types."""
simple = 'Simple'
link_account = 'LinkAccount'
@callback
def async_setup(hass):
"""Activate Alexa component."""
hass.http.register_view(AlexaIntentsView)
class UnknownRequest(HomeAssistantError):
"""When an unknown Alexa request is passed in."""
class AlexaIntentsView(http.HomeAssistantView):
"""Handle Alexa requests."""
url = INTENTS_API_ENDPOINT
name = 'api:alexa'
@asyncio.coroutine
def post(self, request):
"""Handle Alexa."""
hass = request.app['hass']
message = yield from request.json()
_LOGGER.debug("Received Alexa request: %s", message)
try:
response = yield from async_handle_message(hass, message)
return b'' if response is None else self.json(response)
except UnknownRequest as err:
_LOGGER.warning(str(err))
return self.json(intent_error_response(
hass, message, str(err)))
except intent.UnknownIntent as err:
_LOGGER.warning(str(err))
return self.json(intent_error_response(
hass, message,
"This intent is not yet configured within Home Assistant."))
except intent.InvalidSlotInfo as err:
_LOGGER.error("Received invalid slot data from Alexa: %s", err)
return self.json(intent_error_response(
hass, message,
"Invalid slot information received for this intent."))
except intent.IntentError as err:
_LOGGER.exception(str(err))
return self.json(intent_error_response(
hass, message, "Error handling intent."))
def intent_error_response(hass, message, error):
"""Return an Alexa response that will speak the error message."""
alexa_intent_info = message.get('request').get('intent')
alexa_response = AlexaResponse(hass, alexa_intent_info)
alexa_response.add_speech(SpeechType.plaintext, error)
return alexa_response.as_dict()
@asyncio.coroutine
def async_handle_message(hass, message):
"""Handle an Alexa intent.
Raises:
- UnknownRequest
- intent.UnknownIntent
- intent.InvalidSlotInfo
- intent.IntentError
"""
req = message.get('request')
req_type = req['type']
handler = HANDLERS.get(req_type)
if not handler:
raise UnknownRequest('Received unknown request {}'.format(req_type))
return (yield from handler(hass, message))
@HANDLERS.register('SessionEndedRequest')
@asyncio.coroutine
def async_handle_session_end(hass, message):
"""Handle a session end request."""
return None
@HANDLERS.register('IntentRequest')
@HANDLERS.register('LaunchRequest')
@asyncio.coroutine
def async_handle_intent(hass, message):
"""Handle an intent request.
Raises:
- intent.UnknownIntent
- intent.InvalidSlotInfo
- intent.IntentError
"""
req = message.get('request')
alexa_intent_info = req.get('intent')
alexa_response = AlexaResponse(hass, alexa_intent_info)
if req['type'] == 'LaunchRequest':
intent_name = message.get('session', {}) \
.get('application', {}) \
.get('applicationId')
else:
intent_name = alexa_intent_info['name']
intent_response = yield from intent.async_handle(
hass, DOMAIN, intent_name,
{key: {'value': value} for key, value
in alexa_response.variables.items()})
for intent_speech, alexa_speech in SPEECH_MAPPINGS.items():
if intent_speech in intent_response.speech:
alexa_response.add_speech(
alexa_speech,
intent_response.speech[intent_speech]['speech'])
break
if 'simple' in intent_response.card:
alexa_response.add_card(
CardType.simple, intent_response.card['simple']['title'],
intent_response.card['simple']['content'])
return alexa_response.as_dict()
def resolve_slot_synonyms(key, request):
"""Check slot request for synonym resolutions."""
# Default to the spoken slot value if more than one or none are found. For
# reference to the request object structure, see the Alexa docs:
# https://tinyurl.com/ybvm7jhs
resolved_value = request['value']
if ('resolutions' in request and
'resolutionsPerAuthority' in request['resolutions'] and
len(request['resolutions']['resolutionsPerAuthority']) >= 1):
# Extract all of the possible values from each authority with a
# successful match
possible_values = []
for entry in request['resolutions']['resolutionsPerAuthority']:
if entry['status']['code'] != SYN_RESOLUTION_MATCH:
continue
possible_values.extend([item['value']['name']
for item
in entry['values']])
# If there is only one match use the resolved value, otherwise the
# resolution cannot be determined, so use the spoken slot value
if len(possible_values) == 1:
resolved_value = possible_values[0]
else:
_LOGGER.debug(
'Found multiple synonym resolutions for slot value: {%s: %s}',
key,
request['value']
)
return resolved_value
class AlexaResponse:
"""Help generating the response for Alexa."""
def __init__(self, hass, intent_info):
"""Initialize the response."""
self.hass = hass
self.speech = None
self.card = None
self.reprompt = None
self.session_attributes = {}
self.should_end_session = True
self.variables = {}
# Intent is None if request was a LaunchRequest or SessionEndedRequest
if intent_info is not None:
for key, value in intent_info.get('slots', {}).items():
# Only include slots with values
if 'value' not in value:
continue
_key = key.replace('.', '_')
self.variables[_key] = resolve_slot_synonyms(key, value)
def add_card(self, card_type, title, content):
"""Add a card to the response."""
assert self.card is None
card = {
"type": card_type.value
}
if card_type == CardType.link_account:
self.card = card
return
card["title"] = title
card["content"] = content
self.card = card
def add_speech(self, speech_type, text):
"""Add speech to the response."""
assert self.speech is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
self.speech = {
'type': speech_type.value,
key: text
}
def add_reprompt(self, speech_type, text):
"""Add reprompt if user does not answer."""
assert self.reprompt is None
key = 'ssml' if speech_type == SpeechType.ssml else 'text'
self.reprompt = {
'type': speech_type.value,
key: text.async_render(self.variables)
}
def as_dict(self):
"""Return response in an Alexa valid dict."""
response = {
'shouldEndSession': self.should_end_session
}
if self.card is not None:
response['card'] = self.card
if self.speech is not None:
response['outputSpeech'] = self.speech
if self.reprompt is not None:
response['reprompt'] = {
'outputSpeech': self.reprompt
}
return {
'version': '1.0',
'sessionAttributes': self.session_attributes,
'response': response,
}
|
|
import numpy as np
import nibabel as nb
class Masker(object):
""" Handles vectorization/masking/unmasking of images. """
def __init__(self, volume, layers=None):
""" Initialize a new Masker.
Args:
volume: A volume indicating the global space within which all
subsequent layers must reside. Any voxel in the mask with a
non-zero valid is considered valid for analyses. Can be either
an image filename or a NiBabel image.
layers: Optional masking layers to add; see docstring for add().
"""
if isinstance(volume, basestring):
volume = nb.load(volume)
self.volume = volume
data = self.volume.get_data()
self.dims = data.shape
self.vox_dims = self.get_header().get_zooms()
self.full = np.float64(data.ravel())
self.global_mask = np.where(self.full)
self.reset()
if layers is not None:
self.add(layers)
def reset(self):
""" Reset/remove all layers, keeping only the initial volume. """
self.layers = {}
self.stack = []
self.set_mask()
self.n_vox_in_vol = len(np.where(self.current_mask)[0])
def add(self, layers, above=None, below=None):
""" Add one or more layers to the stack of masking layers.
Args:
layers: A string, NiBabel image, list, or dict. If anything other
than a dict is passed, assigns sequential layer names based on
the current position in stack; if a dict, uses key as the name
and value as the mask image.
"""
def add_named_layer(name, image):
image = self.get_image(image, output='vector')
if above is not None:
image[image < above] = 0.
if below is not None:
image[image > below] = 0.
self.layers[name] = image
self.stack.append(name)
if isinstance(layers, dict):
for (name, image) in layers.items():
add_named_layer(name, image)
else:
if not isinstance(layers, list):
layers = [layers]
for image in layers:
name = 'layer_%d' % len(self.stack)
add_named_layer(name, image)
self.set_mask()
def remove(self, layers):
""" Remove one or more layers from the stack of masking layers.
Args:
layers: An int, string or list of strings and/or ints. Ints are
interpreted as indices in the stack to remove; strings are
interpreted as names of layers to remove. Negative ints will
also work--i.e., remove(-1) will drop the last layer added.
"""
if not isinstance(layers, list):
layers = [layers]
for l in layers:
if isinstance(l, basestring):
if l not in self.layers:
raise ValueError("There's no image/layer named '%s' in "
"the masking stack!" % l)
self.stack.remove(l)
else:
l = self.stack.pop(l)
del self.layers[l]
self.set_mask()
def get_image(self, image, output='vector'):
""" A flexible method for transforming between different
representations of image data.
Args:
image: The input image. Can be a string (filename of image),
NiBabel image, N-dimensional array (must have same shape as
self.volume), or vectorized image data (must have same length
as current conjunction mask).
output: The format of the returned image representation. Must be
one of:
'vector': A 1D vectorized array
'array': An N-dimensional array, with shape = self.volume.shape
'image': A NiBabel image
Returns: An object containing image data; see output options above.
"""
if isinstance(image, basestring):
image = nb.load(image)
if type(image).__module__.startswith('nibabel'):
if output == 'image':
return image
image = image.get_data()
if not type(image).__module__.startswith('numpy'):
raise ValueError("Input image must be a string, a NiBabel image, "
"or a numpy array.")
if image.shape[:3] == self.volume.shape:
if output == 'image':
return nb.nifti1.Nifti1Image(image, None, self.get_header())
elif output == 'array':
return image
else:
image = image.ravel()
if output == 'vector':
return image.ravel()
image = np.reshape(image, self.volume.shape)
if output == 'array':
return image
return nb.nifti1.Nifti1Image(image, None, self.get_header())
def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False):
""" Vectorize an image and mask out all invalid voxels.
Args:
images: The image to vectorize and mask. Input can be any object
handled by get_image().
layers: Which mask layers to use (specified as int, string, or
list of ints and strings). When None, applies the conjunction
of all layers.
nan_to_num: boolean indicating whether to convert NaNs to 0.
in_global_mask: Whether to return the resulting masked vector in
the globally masked space (i.e., n_voxels =
len(self.global_mask)). If False (default), returns in the full
image space (i.e., n_voxels = len(self.volume)).
Returns:
A 1D NumPy array of in-mask voxels.
"""
self.set_mask(layers)
image = self.get_image(image, output='vector')
if in_global_mask:
masked_data = image[self.global_mask]
masked_data[~self.get_mask(in_global_mask=True)] = 0
else:
masked_data = image[self.current_mask]
if nan_to_num:
masked_data = np.nan_to_num(masked_data)
return masked_data
def unmask(self, data, layers=None, output='array'):
""" Reconstruct a masked vector into the original 3D volume.
Args:
data: The 1D vector to reconstruct. (Can also be a 2D vector where
the second dimension is time, but then output will always
be set to 'array'--i.e., a 4D image will be returned.)
layers: Which mask layers to use (specified as int, string, or list
of ints and strings). When None, applies the conjunction of all
layers. Note that the layers specified here must exactly match
the layers used in the mask() operation, otherwise the shape of
the mask will be incorrect and bad things will happen.
output: What kind of object to return. See options in get_image().
By default, returns an N-dimensional array of reshaped data.
"""
self.set_mask(layers)
if data.ndim == 2:
n_volumes = data.shape[1]
# Assume 1st dimension is voxels, 2nd is time
# but we generate x,y,z,t volume
image = np.zeros(self.full.shape + (n_volumes,))
image[self.current_mask, :] = data
image = np.reshape(image, self.volume.shape + (n_volumes,))
else:
# img = self.full.copy()
image = np.zeros(self.full.shape)
image[self.current_mask] = data
return self.get_image(image, output)
def get_mask(self, layers=None, output='vector', in_global_mask=True):
""" Set the current mask by taking the conjunction of all specified
layers.
Args:
layers: Which layers to include. See documentation for add() for
format.
include_global_mask: Whether or not to automatically include the
global mask (i.e., self.volume) in the conjunction.
"""
if in_global_mask:
output = 'vector'
if layers is None:
layers = self.layers.keys()
elif not isinstance(layers, list):
layers = [layers]
layers = map(lambda x: x if isinstance(x, basestring)
else self.stack[x], layers)
layers = [self.layers[l] for l in layers if l in self.layers]
# Always include the original volume
layers.append(self.full)
layers = np.vstack(layers).T.astype(bool)
mask = layers.all(axis=1)
mask = self.get_image(mask, output)
return mask[self.global_mask] if in_global_mask else mask
def set_mask(self, layers=None):
self.current_mask = self.get_mask(layers, in_global_mask=False)
self.n_vox_in_mask = len(np.where(self.current_mask)[0])
def get_header(self):
""" A wrapper for the NiBabel method. """
return self.volume.get_header()
|
|
import logging
from dateutil.relativedelta import relativedelta
from dataactbroker.helpers.generation_helper import a_file_query, d_file_query, copy_file_generation_to_job
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.function_bag import (mark_job_status, filename_fyp_sub_format, filename_fyp_format,
get_timestamp)
from dataactcore.models.jobModels import Job
from dataactcore.models.lookups import DETACHED_FILENAMES, SUBMISSION_FILENAMES
from dataactcore.utils import fileA, fileD1, fileD2, fileE_F
from dataactcore.utils.responseException import ResponseException
from dataactvalidator.filestreaming.csv_selection import write_stream_query
logger = logging.getLogger(__name__)
class FileGenerationManager:
""" Responsible for managing the generation of all files.
Attributes:
sess: Current database session
is_local: A boolean flag indicating whether the application is being run locally or not
file_generation: FileGeneration object representing a D file generation task
job: Job object for an E or F file generation task
file_type: File type letter name
"""
def __init__(self, sess, is_local, file_generation=None, job=None):
""" Initialize the FileGeneration Manager.
Args:
sess: Current database session
is_local: A boolean flag indicating whether the application is being run locally or not
file_generation: FileGeneration object representing a D file generation task
job: Job object for an E or F file generation task
"""
self.sess = sess
self.is_local = is_local
self.file_generation = file_generation
self.job = job
self.file_type = job.file_type.letter_name if job else file_generation.file_type
self.element_numbers = file_generation.element_numbers if file_generation else False
def generate_file(self, agency_code=None):
""" Generates a file based on the FileGeneration object and updates any Jobs referencing it """
fillin_vals = {'timestamp': get_timestamp()}
if self.file_generation:
fillin_vals.update({
'start': self.file_generation.start_date.strftime('%Y%m%d'),
'end': self.file_generation.end_date.strftime('%Y%m%d'),
'agency_type': self.file_generation.agency_type,
'ext': '.{}'.format(self.file_generation.file_format),
})
if self.job and self.job.submission:
# Submission Files
fillin_vals.update({
'submission_id': self.job.submission_id,
'FYP': filename_fyp_sub_format(self.job.submission),
})
file_name = SUBMISSION_FILENAMES[self.file_type].format(**fillin_vals)
else:
# Detached Files
if self.job and self.job.file_type.letter_name == 'A':
period_date = self.job.end_date + relativedelta(months=3)
fillin_vals['FYP'] = filename_fyp_format(period_date.year, period_date.month, False)
file_name = DETACHED_FILENAMES[self.file_type].format(**fillin_vals)
if self.is_local:
file_path = "".join([CONFIG_BROKER['broker_files'], file_name])
else:
file_path = "".join(["None/", file_name])
# Generate the file and upload to S3
log_data = {'message': 'Finished file {} generation'.format(self.file_type), 'message_type': 'ValidatorInfo',
'file_type': self.file_type, 'file_path': file_path}
if self.file_generation:
self.generate_d_file(file_path)
log_data.update({
'agency_code': self.file_generation.agency_code, 'agency_type': self.file_generation.agency_type,
'start_date': self.file_generation.start_date, 'end_date': self.file_generation.end_date,
'file_generation_id': self.file_generation.file_generation_id
})
elif self.job.file_type.letter_name in ['A', 'E', 'F']:
log_data['job_id'] = self.job.job_id
mark_job_status(self.job.job_id, 'running')
if self.job.file_type.letter_name == 'A':
if not agency_code:
raise ResponseException('Agency code not provided for an A file generation')
self.generate_a_file(agency_code, file_path)
else:
# Call self.generate_%s_file() where %s is e or f based on the Job's file_type
file_type_lower = self.job.file_type.letter_name.lower()
getattr(self, 'generate_%s_file' % file_type_lower)()
mark_job_status(self.job.job_id, 'finished')
else:
e = 'No FileGeneration object for D file generation.' if self.file_type in ['D1', 'D2'] else \
'Cannot generate file for {} file type.'.format(self.file_type if self.file_type else 'empty')
raise ResponseException(e)
logger.info(log_data)
def generate_d_file(self, file_path):
""" Write file D1 or D2 to an appropriate CSV. """
log_data = {
'message': 'Starting file {} generation'.format(self.file_type), 'message_type': 'ValidatorInfo',
'agency_code': self.file_generation.agency_code, 'agency_type': self.file_generation.agency_type,
'start_date': self.file_generation.start_date, 'end_date': self.file_generation.end_date,
'file_generation_id': self.file_generation.file_generation_id, 'file_type': self.file_type,
'file_format': self.file_generation.file_format, 'file_path': file_path,
'element_numbers': self.element_numbers
}
logger.info(log_data)
original_filename = file_path.split('/')[-1]
local_file = "".join([CONFIG_BROKER['d_file_storage_path'], original_filename])
header_index = 0
# Prepare file data
if self.file_type == 'D1':
file_utils = fileD1
if self.file_generation.element_numbers:
header_index = 1
elif self.file_type == 'D2':
file_utils = fileD2
else:
raise ResponseException('Failed to generate_d_file with file_type:{} (must be D1 or D2).'.format(
self.file_type))
headers = [val[header_index] for key, val in file_utils.mapping.items()]
log_data['message'] = 'Writing {} file {}: {}'.format(self.file_type, self.file_generation.file_format.upper(),
original_filename)
logger.info(log_data)
query_utils = {
"sess": self.sess, "file_utils": file_utils, "agency_code": self.file_generation.agency_code,
"agency_type": self.file_generation.agency_type, "start": self.file_generation.start_date,
"end": self.file_generation.end_date}
logger.debug({'query_utils': query_utils})
# Generate the file locally, then place in S3
write_stream_query(self.sess, d_file_query(query_utils), local_file, file_path, self.is_local, header=headers,
file_format=self.file_generation.file_format)
log_data['message'] = 'Finished writing {} file {}: {}'.format(self.file_type,
self.file_generation.file_format.upper(),
original_filename)
logger.info(log_data)
self.file_generation.file_path = file_path
self.sess.commit()
for job in self.sess.query(Job).filter_by(file_generation_id=self.file_generation.file_generation_id).all():
copy_file_generation_to_job(job, self.file_generation, self.is_local)
def generate_e_file(self):
""" Write file E to an appropriate CSV. """
log_data = {'message': 'Starting file E generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
'submission_id': self.job.submission_id, 'file_type': 'executive_compensation'}
logger.info(log_data)
file_e_sql = fileE_F.generate_file_e_sql(self.job.submission_id)
log_data['message'] = 'Writing E file CSV: {}'.format(self.job.original_filename)
logger.info(log_data)
# Generate the file and put in S3
write_stream_query(self.sess, file_e_sql, self.job.original_filename, self.job.filename, self.is_local,
generate_headers=True, generate_string=False)
log_data['message'] = 'Finished writing E file CSV: {}'.format(self.job.original_filename)
logger.info(log_data)
def generate_f_file(self):
""" Write rows from fileF.generate_f_rows to an appropriate CSV. """
log_data = {'message': 'Starting file F generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
'submission_id': self.job.submission_id, 'file_type': 'sub_award'}
logger.info(log_data)
file_f_sql = fileE_F.generate_file_f_sql(self.job.submission_id)
# writing locally first without uploading
log_data['message'] = 'Writing F file CSV: {}'.format(self.job.original_filename)
logger.info(log_data)
# Generate the file and put in S3
write_stream_query(self.sess, file_f_sql, self.job.original_filename, self.job.filename, self.is_local,
generate_headers=True, generate_string=False)
log_data['message'] = 'Finished writing F file CSV: {}'.format(self.job.original_filename)
logger.info(log_data)
def generate_a_file(self, agency_code, file_path):
""" Write file A to an appropriate CSV. """
self.job.filename = file_path
self.job.original_filename = file_path.split('/')[-1]
self.sess.commit()
log_data = {'message': 'Starting file A generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
'agency_code': agency_code, 'file_type': self.job.file_type.letter_name,
'start_date': self.job.start_date, 'end_date': self.job.end_date,
'filename': self.job.original_filename}
logger.info(log_data)
local_file = "".join([CONFIG_BROKER['d_file_storage_path'], self.job.original_filename])
headers = [val[0] for key, val in fileA.mapping.items()]
# add 3 months to account for fiscal year
period_date = self.job.end_date + relativedelta(months=3)
log_data['message'] = 'Writing A file CSV: {}'.format(self.job.original_filename)
logger.info(log_data)
query_utils = {"agency_code": agency_code, "period": period_date.month, "year": period_date.year,
"sess": self.sess}
logger.debug({'query_utils': query_utils})
# Generate the file and put in S3
write_stream_query(self.sess, a_file_query(query_utils), local_file, self.job.filename, self.is_local,
header=headers)
log_data['message'] = 'Finished writing A file CSV: {}'.format(self.job.original_filename)
logger.info(log_data)
|
|
#!/usr/bin/env python
#
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate test functions for use with mock_server_t.
Defines functions like future_cursor_next in future-functions.h and
future-functions.c, which defer a libmongoc operation to a background thread
via functions like background_cursor_next. Also defines functions like
future_value_set_bson_ptr and future_value_get_bson_ptr which support the
future / background functions, and functions like future_get_bson_ptr which
wait for a future to resolve, then return its value.
These future functions are used in conjunction with mock_server_t to
conveniently test libmongoc wire protocol operations.
Written for Python 2.6+, requires Jinja 2 for templating.
"""
from collections import namedtuple
from os.path import basename, dirname, join as joinpath, normpath
from jinja2 import Environment, FileSystemLoader # Please "pip install jinja2".
this_dir = dirname(__file__)
template_dir = joinpath(this_dir, 'future_function_templates')
mock_server_dir = normpath(joinpath(this_dir, '../tests/mock_server'))
# Add additional types here. Use typedefs for derived types so they can
# be named with one symbol.
typedef = namedtuple("typedef", ["name", "typedef"])
# These are typedef'ed if necessary in future-value.h, and added to the union
# of possible future_value_t.value types. future_value_t getters and setters
# are generated for all types, as well as future_t getters.
typedef_list = [
# Fundamental.
typedef("bool", None),
typedef("char_ptr", "char *"),
typedef("char_ptr_ptr", "char **"),
typedef("int", None),
typedef("int64_t", None),
typedef("size_t", None),
typedef("ssize_t", None),
typedef("uint32_t", None),
# Const fundamental.
typedef("const_char_ptr", "const char *"),
# libbson.
typedef("bson_error_ptr", "bson_error_t *"),
typedef("bson_ptr", "bson_t *"),
# Const libbson.
typedef("const_bson_ptr", "const bson_t *"),
typedef("const_bson_ptr_ptr", "const bson_t **"),
# libmongoc.
typedef("mongoc_bulk_operation_ptr", "mongoc_bulk_operation_t *"),
typedef("mongoc_client_ptr", "mongoc_client_t *"),
typedef("mongoc_collection_ptr", "mongoc_collection_t *"),
typedef("mongoc_cursor_ptr", "mongoc_cursor_t *"),
typedef("mongoc_database_ptr", "mongoc_database_t *"),
typedef("mongoc_gridfs_file_ptr", "mongoc_gridfs_file_t *"),
typedef("mongoc_gridfs_ptr", "mongoc_gridfs_t *"),
typedef("mongoc_insert_flags_t", None),
typedef("mongoc_iovec_ptr", "mongoc_iovec_t *"),
typedef("mongoc_query_flags_t", None),
typedef("const_mongoc_index_opt_t", "const mongoc_index_opt_t *"),
typedef("mongoc_server_description_ptr", "mongoc_server_description_t *"),
typedef("mongoc_ss_optype_t", None),
typedef("mongoc_topology_ptr", "mongoc_topology_t *"),
typedef("mongoc_write_concern_ptr", "mongoc_write_concern_t *"),
# Const libmongoc.
typedef("const_mongoc_find_and_modify_opts_ptr", "const mongoc_find_and_modify_opts_t *"),
typedef("const_mongoc_read_prefs_ptr", "const mongoc_read_prefs_t *"),
typedef("const_mongoc_write_concern_ptr", "const mongoc_write_concern_t *"),
]
type_list = [T.name for T in typedef_list]
type_list_with_void = type_list + ['void']
param = namedtuple("param", ["type_name", "name"])
future_function = namedtuple("future_function", ["ret_type", "name", "params"])
# Add additional functions to be tested here. For a name like "cursor_next", we
# generate two functions: future_cursor_next to prepare the future_t and launch
# a background thread, and background_cursor_next to run on the thread and
# resolve the future.
future_functions = [
future_function("uint32_t",
"mongoc_bulk_operation_execute",
[param("mongoc_bulk_operation_ptr", "bulk"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_client_command_simple",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db_name"),
param("const_bson_ptr", "command"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_client_read_command_with_opts",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db_name"),
param("const_bson_ptr", "command"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("const_bson_ptr", "opts"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_client_write_command_with_opts",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db_name"),
param("const_bson_ptr", "command"),
param("const_bson_ptr", "opts"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_client_read_write_command_with_opts",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db_name"),
param("const_bson_ptr", "command"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("const_bson_ptr", "opts"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("void",
"mongoc_client_kill_cursor",
[param("mongoc_client_ptr", "client"),
param("int64_t", "cursor_id")]),
future_function("mongoc_cursor_ptr",
"mongoc_collection_aggregate",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_query_flags_t", "flags"),
param("const_bson_ptr", "pipeline"),
param("const_bson_ptr", "options"),
param("const_mongoc_read_prefs_ptr", "read_prefs")]),
future_function("int64_t",
"mongoc_collection_count",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_query_flags_t", "flags"),
param("const_bson_ptr", "query"),
param("int64_t", "skip"),
param("int64_t", "limit"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_error_ptr", "error")]),
future_function("int64_t",
"mongoc_collection_count_with_opts",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_query_flags_t", "flags"),
param("const_bson_ptr", "query"),
param("int64_t", "skip"),
param("int64_t", "limit"),
param("const_bson_ptr", "opts"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_create_index_with_opts",
[param("mongoc_collection_ptr", "collection"),
param("const_bson_ptr", "keys"),
param("const_mongoc_index_opt_t", "opt"),
param("bson_ptr", "opts"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_find_and_modify_with_opts",
[param("mongoc_collection_ptr", "collection"),
param("const_bson_ptr", "query"),
param("const_mongoc_find_and_modify_opts_ptr", "opts"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_find_and_modify",
[param("mongoc_collection_ptr", "collection"),
param("const_bson_ptr", "query"),
param("const_bson_ptr", "sort"),
param("const_bson_ptr", "update"),
param("const_bson_ptr", "fields"),
param("bool", "_remove"),
param("bool", "upsert"),
param("bool", "_new"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("mongoc_cursor_ptr",
"mongoc_collection_find_indexes",
[param("mongoc_collection_ptr", "collection"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_stats",
[param("mongoc_collection_ptr", "collection"),
param("const_bson_ptr", "options"),
param("bson_ptr", "stats"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_insert",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_insert_flags_t", "flags"),
param("const_bson_ptr", "document"),
param("const_mongoc_write_concern_ptr", "write_concern"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_collection_insert_bulk",
[param("mongoc_collection_ptr", "collection"),
param("mongoc_insert_flags_t", "flags"),
param("const_bson_ptr_ptr", "documents"),
param("uint32_t", "n_documents"),
param("const_mongoc_write_concern_ptr", "write_concern"),
param("bson_error_ptr", "error")]),
future_function("void",
"mongoc_cursor_destroy",
[param("mongoc_cursor_ptr", "cursor")]),
future_function("bool",
"mongoc_cursor_next",
[param("mongoc_cursor_ptr", "cursor"),
param("const_bson_ptr_ptr", "doc")]),
future_function("char_ptr_ptr",
"mongoc_client_get_database_names",
[param("mongoc_client_ptr", "client"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_database_command_simple",
[param("mongoc_database_ptr", "database"),
param("bson_ptr", "command"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_ptr", "reply"),
param("bson_error_ptr", "error")]),
future_function("char_ptr_ptr",
"mongoc_database_get_collection_names",
[param("mongoc_database_ptr", "database"),
param("bson_error_ptr", "error")]),
future_function("ssize_t",
"mongoc_gridfs_file_readv",
[param("mongoc_gridfs_file_ptr", "file"),
param("mongoc_iovec_ptr", "iov"),
param("size_t", "iovcnt"),
param("size_t", "min_bytes"),
param("uint32_t", "timeout_msec")]),
future_function("mongoc_gridfs_file_ptr",
"mongoc_gridfs_find_one",
[param("mongoc_gridfs_ptr", "gridfs"),
param("const_bson_ptr", "query"),
param("bson_error_ptr", "error")]),
future_function("bool",
"mongoc_gridfs_file_remove",
[param("mongoc_gridfs_file_ptr", "file"),
param("bson_error_ptr", "error")]),
future_function("int",
"mongoc_gridfs_file_seek",
[param("mongoc_gridfs_file_ptr", "file"),
param("int64_t", "delta"),
param("int", "whence")]),
future_function("ssize_t",
"mongoc_gridfs_file_writev",
[param("mongoc_gridfs_file_ptr", "file"),
param("mongoc_iovec_ptr", "iov"),
param("size_t", "iovcnt"),
param("uint32_t", "timeout_msec")]),
future_function("mongoc_server_description_ptr",
"mongoc_topology_select",
[param("mongoc_topology_ptr", "topology"),
param("mongoc_ss_optype_t", "optype"),
param("const_mongoc_read_prefs_ptr", "read_prefs"),
param("bson_error_ptr", "error")]),
future_function("mongoc_gridfs_ptr",
"mongoc_client_get_gridfs",
[param("mongoc_client_ptr", "client"),
param("const_char_ptr", "db"),
param("const_char_ptr", "prefix"),
param("bson_error_ptr", "error")]),
]
for fn in future_functions:
if fn.ret_type not in type_list_with_void:
raise Exception('bad type "%s"\n\nin %s' % (fn.ret_type, fn))
for p in fn.params:
if p.type_name not in type_list:
raise Exception('bad type "%s"\n\nin %s' % (p.type_name, fn))
header_comment = """/**************************************************
*
* Generated by build/%s.
*
* DO NOT EDIT THIS FILE.
*
*************************************************/""" % basename(__file__)
def future_function_name(fn):
if fn.name.startswith('mongoc'):
# E.g. future_cursor_next().
return 'future' + fn.name[len('mongoc'):]
else:
# E.g. future__mongoc_client_kill_cursor().
return 'future_' + fn.name
env = Environment(loader=FileSystemLoader(template_dir))
env.filters['future_function_name'] = future_function_name
files = ["future.h",
"future.c",
"future-value.h",
"future-value.c",
"future-functions.h",
"future-functions.c"]
for file_name in files:
print(file_name)
with open(joinpath(mock_server_dir, file_name), 'w+') as f:
t = env.get_template(file_name + ".template")
f.write(t.render(globals()))
|
|
import sys
import struct
if len(sys.argv) < 2:
usage();
sys.exit(0)
def usage():
print("%s [filename]"%(sys.argv[0]))
class NotMP4FormatException(Exception):
pass
class CHUNK(object):
def __init__(self, pos):
self.pos = pos
self.samples_count = 0
self.samples_desc_idx = 0
self.size = 0
def to_annexb(self, filename, sps, pps):
wf = open(filename, "wb");
wf.write(struct.pack('>i', 1))
wf.write(sps[0])
wf.write(struct.pack('>i', 1))
wf.write(pps[0])
f = open(sys.argv[1], "rb");
pos = 0
f.seek(self.pos)
while True:
tsize = struct.unpack('>i', f.read(4))[0]
if tsize <= 0:
break
pos += 4
pos += tsize
if pos > self.size:
break
data = f.read(tsize)
wf.write(struct.pack('>i', 1))
wf.write(data)
def __str__(self):
return "CHUNK(pos: %s, size: %s, samples: %s)"%(self.pos, self.size, self.samples_count)
def __repr__(self):
return self.__str__()
class TRACK(object):
def __init__(self, track_idx, track_atom, mdat_atom):
self.track_idx = track_idx
self.track_atom = track_atom
self.mdat_atom = mdat_atom
self.stbl_atom = None
self.ppss = None
self.spss = None
self.chunks = []
self.iframes = None
self.merge()
def __str__(self):
return "TRACK(%s)"%(self.track_idx)
def __repr__(self):
return self.__str__()
def merge(self):
self.stbl_atom = self.track_atom.find_child_atom("mdia/minf/stbl")
stco_atom = self.stbl_atom.find_child_atom("stco")
chunks = []
f = open(sys.argv[1], "rb");
f.seek(stco_atom.pos+12)
stco_size = struct.unpack('>i', f.read(4))[0]
for i in range(stco_size):
p = struct.unpack('>i', f.read(4))[0]
if i != 0:
before_chunk = chunks[-1]
before_chunk.size = p - before_chunk.pos
chunks.append(CHUNK(p))
self.chunks = chunks
stsc_atom = self.stbl_atom.find_child_atom("stsc")
f.seek(stsc_atom.pos+12)
stsc_size = struct.unpack('>i', f.read(4))[0]
samples = []
end_chunk = stco_size
for i in range(stsc_size):
start_chunk = struct.unpack('>i', f.read(4))[0]
sample_count = struct.unpack('>i', f.read(4))[0]
desc_idx = struct.unpack('>i', f.read(4))[0]
if i != 0:
samples[i][1] = start_chunk - 1
samples.append([start_chunk, end_chunk+1, sample_count, desc_idx])
idx = 0
sample_info = samples[idx]
for i in range(1, stco_size+1):
chunk = chunks[i-1]
if i > sample_info[1]:
idx += 1
sample_info = samples[idx]
chunk.samples_count = sample_info[2]
chunk.samples_desc_idx = sample_info[3]
stss_atom = self.stbl_atom.find_child_atom("stss")
if stss_atom == None:
return
f.seek(stss_atom.pos+12)
stss_size = struct.unpack('>i', f.read(4))[0]
iframes = []
for i in range(stss_size):
iframes.append(struct.unpack('>i', f.read(4))[0])
self.iframes = iframes
stsd_atom = self.stbl_atom.find_child_atom("stsd")
if stsd_atom == None:
return
self.ppss = stsd_atom.properties[0]["avc"]["pps"]
self.spss = stsd_atom.properties[0]["avc"]["sps"]
class ATOM(object):
def __init__(self, size, name, pos):
self.size = size
self.name = name
self.pos = pos
self.children = []
self.properties = None
def find_child_atom_internal(self, atoms, part_arr):
name = part_arr[0]
for atom in atoms:
if atom.name == name:
if len(part_arr) == 1:
return atom
return self.find_child_atom_internal(atom.children, part_arr[1:])
return None
def find_child_atom(self, name):
part_arr = name.split("/")
return self.find_child_atom_internal(self.children, part_arr)
def __str__(self):
return "%s(%s)"%(self.name, self.size)
def __repr__(self):
return self.__str__()
class MP4(object):
def __init__(self, filename):
self.filename = filename
self.f = open(filename, "rb");
self.children = []
self.track_size = 0
self.moov_atom = None
self.mdat_atom = None
self.tracks = []
def is_parent_atom(self, name):
return name not in ['mdat', 'tkhd', 'vmhd']
def create_empty_atom(self):
return ATOM(0, "", 0)
def get_moov_atom(self):
for atom in self.children:
if atom.name == "moov":
return atom
raise NotMP4FormatException()
def get_mdat_atom(self):
for atom in self.children:
if atom.name == "mdat":
return atom
raise NotMP4FormatException()
def get_track_size(self):
return self.track_size
def get_track_size_internal(self, atom):
count = 0
for atom in atom.children:
if atom.name == "trak":
count += 1
return count;
def parse(self, start_pos = 0):
#mp4 container follow BIG ENDIAN
next_pos = start_pos
try:
while True:
atom = self.parse_internal(next_pos)
self.children.append(atom)
next_pos += atom.size
except struct.error:
pass
except:
raise NotMP4FormatException()
self.moov_atom = self.get_moov_atom()
self.mdat_atom = self.get_mdat_atom()
self.track_size = self.get_track_size_internal(self.moov_atom)
self.tracks = self.merge_tracks()
return True
def merge_tracks(self):
tracks = []
count = 0
for atom in self.moov_atom.children:
if atom.name == "trak":
tracks.append(TRACK(count, atom, self.mdat_atom))
count += 1
return tracks
def traverse(self, udf = None):
self.traverse_internal(self.children, 0, udf)
def traverse_internal(self, atoms, depth, udf = None):
buf = ""
for i in range(depth):
buf += " "
for atom in atoms:
print "%s%s"%(buf, atom)
if udf is not None:
udf(atom)
self.traverse_internal(atom.children, depth+1, udf)
def get_atom(self, pos):
self.f.seek(pos)
size = struct.unpack('>i', self.f.read(4))[0]
name = self.f.read(4)
return ATOM(size, name, pos)
def parse_avcC(self, avc, name, size):
avcC = {}
spss = []
ppss = []
version = struct.unpack('>b', self.f.read(1))[0]
avc_profile_idc = struct.unpack('>b', self.f.read(1))[0]
profile_compatibility = struct.unpack('>b', self.f.read(1))[0]
avc_level_idc = struct.unpack('>b', self.f.read(1))[0]
lengh_size_minus_one = (struct.unpack('>b', self.f.read(1))[0]) & 0x03 + 1
num_of_sps = (struct.unpack('>b', self.f.read(1))[0]) & 0x1F
for i in range(num_of_sps):
length_sps = struct.unpack('>h', self.f.read(2))[0]
sps = self.f.read(length_sps)
spss.append(sps)
num_of_pps = struct.unpack('>b', self.f.read(1))[0]
for i in range(num_of_pps):
length_pps = struct.unpack('>h', self.f.read(2))[0]
pps = self.f.read(length_pps)
ppss.append(pps)
avcC["length_size_minus_one"] = lengh_size_minus_one
avcC["sps"] = spss
avcC["pps"] = ppss
return avcC
def parse_avc_internal(self, atom):
avc = {}
size = struct.unpack('>i', self.f.read(4))[0]
name = self.f.read(4)
if name != "avc1":
return None
avc["name"] = name
self.f.read(24)
avc["w"] = struct.unpack('>h', self.f.read(2))[0]
avc["h"] = struct.unpack('>h', self.f.read(2))[0]
avc["hres"] = struct.unpack('>i', self.f.read(4))[0]
avc["vres"] = struct.unpack('>i', self.f.read(4))[0]
self.f.read(4)
frame_count = struct.unpack('>h', self.f.read(2))[0]
if frame_count != 1:
return None
self.f.read(32)
depth = struct.unpack('>h', self.f.read(2))[0]
if depth != 0x18:
return None
pd = struct.unpack('>h', self.f.read(2))[0]
if pd != -1:
return None
while True:
tsize = struct.unpack('>i', self.f.read(4))[0]
tname = self.f.read(4)
if tname == "avcC":
avc["avc"] = self.parse_avcC(avc, tname, tsize)
break
else:
self.f.read(tsize-8)
return avc
def parse_avc(self, atom):
self.f.seek(atom.pos+12)
entry_count = struct.unpack('>i', self.f.read(4))[0]
entries = []
for i in range(entry_count):
entry = self.parse_avc_internal(atom)
if entry is not None:
entries.append(entry)
return entries
def parse_internal(self, pos, total_size = 0):
atom = self.get_atom(pos)
if total_size > 0 and atom.size > total_size:
return self.create_empty_atom()
if self.is_parent_atom(atom.name) == False:
return atom
if atom.name == "stsd":
child = self.parse_avc(atom)
atom.properties = child
return atom
next_pos = atom.pos + 8
temp_size = atom.size
while (next_pos+8) < (atom.pos + atom.size):
child = self.parse_internal(next_pos, atom.size)
if (child.size >= atom.size) or child.size <= 0:
break
atom.children.append(child)
next_pos += child.size
return atom
def buffer_to_lines(buf):
arr = []
size = len(buf)
mod = size % 16
line = ""
for i in range(size):
if i != 0 and i % 16 == 0:
arr.append(line)
line = ""
else:
line += " "
val = ord(buf[i])
v = "{0:02x}".format(val)
line += v
if mod != 0:
for i in range(16-mod):
line += " 00"
if len(line) != 0:
arr.append(line)
return arr
if __name__ == "__main__":
mp4 = MP4(sys.argv[1])
mp4.parse()
mp4.traverse()
track = mp4.tracks[0]
# print track.chunks
track.chunks[0].to_annexb("/Users/charsyam/chunk0-0", track.spss, track.ppss)
# track.chunks[1].to_annexb("d:\\chunk1", track.spss, track.ppss, size)
|
|
# MANSEDS Lunar Rover -- Arm Controller
# Author: Ethan Ramsay
# Import dependencies
import argparse
import RPi.GPIO as GPIO
import Adafruit_PCA9685
# import ikpy
import numpy as np
# from ikpy import plot_utils
import logging
import time
from math import sqrt, pow, atan2, cos, sin
# Logging config
logging.basicConfig(filename='arm.log', level=logging.WARNING)
# System variables
### Variables utilising Adafruit PWM hat
## Channels
channel_arm = [0, 1, 2, 3, 4] # Arm servo PWM channels
channel_grip = [5, 6] # Gripper servo PWM channels
## Pulse Length Limits
# Arm servo pl limits
pl_limits_arm = [[150, 750], [150, 750], [150, 750], [150, 750], [150, 750], [150, 750]]
pl_limits_grip = [[160, 600], [160, 600]] # Gripper servo pulse length limits
full_grip_pl = 300
full_release_pl = 580
### Variables using Pi GPIO
## GPIO pins
# pwm_arm = [0, 0, 0, 0, 0] # Arm servo PWM pins
# pwm_grip = [0, 0] # Gripper servo PWM pins
# dc_limits_arm = [[0, 13], [0, 13], [0, 13], [0, 13], [0, 13], [0, 13]] # Arm servo pl limits
# dc_limits_grip = [[0, 13], [0, 13]] # Gripper servo pulse length limits
## Duty Cycle Limits
# full_grip_dc = 7 # Gripper dc at fully closed position
# full_release_dc = 13 # Gripper dc at fully open position
### Arm dimensions
l1 = 150 # Link one is 150 mm long
l2 = 260 # Link two is 260 mm long
l3 = 145 # Link three is 145 mm long
max_radius = l1 + l2
### Preset values
stationary_base_pl = 410
deposit_angles = [102, 140, 140, 180, 140, 5, 45]
### External filenames
base_angle_data_filename = "base_angle.dat" # External file storing base angle value
# Create kinematic chain from URDF file
# lunar_chain = ikpy.chain.Chain.from_urdf_file("arm.urdf")
# GPIO setup
GPIO.setmode(GPIO.BCM)
def GPIO_arm(pwm_arm):
for pin in pwm_arm:
GPIO.setup(pin, GPIO.OUT)
GPIO.PWM(pin, 60)
def GPIO_grip(pwm_grip):
for pin in pwm_grip:
GPIO.setup(pin, GPIO.OUT)
GPIO.PWM(pin, 60)
# Setup Adafruit PWM Hat
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60)
logging.debug("Adafruit PWM freq set to 60")
# Stop continuous servo from starting
pwm.set_pwm(0, 0, stationary_base_pl)
# Positioning functions
def calc_servo_angles(target_vector):
logging.warning("Desired gripper position vector: %s", target_vector)
r = target_vector[0]
z = target_vector[1] + 145
target_radius = sqrt(pow(r,2)+pow(z,2))
logging.warning("r=%s, z=%s, target_radius=%s", r, z, target_radius)
if target_radius > max_radius:
raise ValueError('Desired position exceeds reach!')
# Inverse kinematics solver for 2 link arm
c2 = (pow(r, 2) + pow(z, 2) - pow(l1, 2) - pow(l2, 2)) / (2 * l1 * l2)
s2 = sqrt(1 - pow(c2, 2))
th2_rad = atan2(s2, c2)
th2 = th2_rad * 180 / 3.142
k1 = l1 + l2*c2
k2 = l2 * s2
th1_rad = atan2(z, r) - atan2(k2, k1)
th1 = th1_rad * 180 / 3.142
# for link 3 always downwards
th3 = 270 - th1 - th2
servo_angles = [th1, th2, th3]
logging.warning("Calculated servo angles: %s", servo_angles)
return servo_angles
def calc_dc(dc_min, dc_max, angle):
dc_range = dc_max - dc_min
inter = dc_range * angle / 180
dc = dc_min + inter
logging.debug("Calculated required duty cycle for desired servo angle: %s", dc)
return dc
def calc_pl(pl_min, pl_max, angle):
if angle > 180:
raise ValueError("Desired angle exceeds servo range of 180 deg")
pl_range = pl_max - pl_min
inter = pl_range * angle / 180
pl = pl_min + inter
logging.warning("Calculated required pulse length for desired servo angle: %s", pl)
pl = int(pl)
return pl
# Control functions
def extend():
val = 1
extended_pl = [0, 0, 0, 0, 0]
extended_pl[0] = 410
extended_pl[1] = calc_pl(pl_limits_arm[1][0], pl_limits_arm[1][1], 90)
extended_pl[2] = extended_pl[1]
extended_pl[3] = calc_pl(pl_limits_arm[3][0], pl_limits_arm[3][1], 90)
extended_pl[4] = calc_pl(pl_limits_arm[4][0], pl_limits_arm[4][1], 40)
# set base rotation to 0 deg
rotate_arm(0, 0, 0)
while True:
pwm.set_pwm(0, 0, extended_pl[0])
pwm.set_pwm(1, 0, extended_pl[1])
pwm.set_pwm(2, 0, extended_pl[2])
pwm.set_pwm(3, 0, extended_pl[3])
pwm.set_pwm(4, 0, extended_pl[4])
if val > 0:
logging.debug("Arm extended")
val -= 1
def stow():
val = 1
while True:
pwm.set_pwm(0, 0, pl_limits_arm[0][0])
pwm.set_pwm(1, 0, 300) # pl_limits_arm[1][0])
pwm.set_pwm(2, 0, 300) # pl_limits_arm[2][0])
pwm.set_pwm(3, 0, 270) # pl_limits_arm[3][0])
pwm.set_pwm(4, 0, pl_limits_arm[4][0])
pwm.set_pwm(5, 0, 240) # pl_limits_grip[0][0])
pwm.set_pwm(6, 0, pl_limits_grip[1][0])
if val == 1:
logging.debug("Arm stowed")
val -= 1
def deposit_pos():
val = 1
deposit_pl = [0, 0, 0, 0, 0, 0, 0]
deposit_pl[0] = 410
deposit_pl[1] = calc_pl(pl_limits_arm[1][0], pl_limits_arm[1][1], 130)
deposit_pl[2] = deposit_pl[1]
deposit_pl[3] = calc_pl(pl_limits_arm[3][0], pl_limits_arm[3][1], 110)
deposit_pl[4] = calc_pl(pl_limits_arm[4][0], pl_limits_arm[4][1], 90)
deposit_pl[5] = calc_pl(pl_limits_grip[0][0], pl_limits_grip[0][1], 5)
deposit_pl[6] = calc_pl(pl_limits_grip[1][0], pl_limits_grip[1][1], 45)
# set base rotation to 0
rotate_arm(0, 0, 0)
while True:
pwm.set_pwm(0, 0, deposit_pl[0])
pwm.set_pwm(1, 0, deposit_pl[1])
pwm.set_pwm(2, 0, deposit_pl[2])
pwm.set_pwm(3, 0, deposit_pl[3])
pwm.set_pwm(4, 0, deposit_pl[4])
pwm.set_pwm(5, 0, deposit_pl[5])
time.sleep(2)
pwm.set_pwm(6, 0, deposit_pl[6])
if val == 1:
logging.debug("Gripper positioned above ice box")
val -= 1
def position_gripper(target_vector):
a = calc_servo_angles(target_vector)
pl = [0, 0, 0, 0, 0]
pl[0] = stationary_base_pl
pl[1] = calc_pl(pl_limits_arm[1][0], pl_limits_arm[1][1], a[0])
pl[2] = pl[1]
pl[3] = calc_pl(pl_limits_arm[3][0], pl_limits_arm[3][1], (180 - a[1]))
pl[4] = calc_pl(pl_limits_arm[4][0], pl_limits_arm[4][1], a[2])
val = 1
while True:
pwm.set_pwm(0, 0, pl[1])
pwm.set_pwm(1, 0, pl[1])
pwm.set_pwm(2, 0, pl[2])
pwm.set_pwm(3, 0, pl[3])
pwm.set_pwm(4, 0, pl[4])
if val == 1:
logging.warning("Arm position command called for target vector: {}".format(target_vector))
logging.warning("Calculated pulse lengths to achieve target vector: {}".format(pl))
val -= 1
def rotate_arm(desired_angle, channel, hold_time):
if desired_angle > 40 or desired_angle < -40:
raise ValueError("Desired angle exceeds current configuration range: min = -40 deg; max \
= 40 deg")
current_angle = 0
with open(base_angle_data_filename, 'r') as f:
current_angle_str = f.read()
print("current_angle_str: '" + current_angle_str + "'")
current_angle = int(current_angle_str)
print(current_angle)
perc_full_rot = 100 * abs((desired_angle - current_angle)) / 360
print(perc_full_rot)
if desired_angle < current_angle:
rot_time = ccw_full_rot_time * perc_full_rot / 100
print(rot_time)
pwm.set_pwm(channel, 0, 440)
time.sleep(rot_time)
pwm.set_pwm(channel, 0, 410)
with open(base_angle_data_filename, 'w') as f:
f.write(desired_angle)
if hold_time != 0:
time.sleep(hold_time)
elif desired_angle > current_angle:
rot_time = ccw_full_rot_time * compensation_factor * perc_full_rot / 100
pwm.set_pwm(channel, 0, 220)
time.sleep(rot.time)
pwm.set_pwm(channel, 0, 410)
with open(base_angle_data_filename, 'w') as f:
f.write(desired_angle)
if hold_time != 0:
time.sleep(hold_time)
else:
pwm.set_pwm(channel, 0, 410)
if hold_time != 0:
time.sleep(hold_time)
# current angle must be equal to desired angle
def grip():
val = 1
while True:
pwm.set_pwm(6, 0, full_grip_pl)
if val == 1:
logging.debug("Gripper clamped")
val -= 1
def drop():
val = 1
while True:
pwm.set_pwm(6, 0, full_release_pl)
if val == 1:
logging.debug("Gripper released")
val -= 1
def worm():
while True:
for i in range(0, 400, 1):
if i < 100:
pl_1 = 400 - i
if i > 20 and i < 200:
pl_1 = 300
pl_3 = 60 + i
if i > 200:
pl_1 = 250
pl_3 = 400 -i
if i > 240:
pl_1 = 250
pl_3 = 400 - 1
pl_4 = i
if i > 320:
pl_1 = 160 + i
pl_3 = 160 + 400 - i
pl_4 = 160 + 400 - i
pwm.set_pwm(1, 0, pl_1)
pwm.set_pwm(2, 0, pl_1)
# Main
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser()
g = parser.add_mutually_exclusive_group(required=True)
ge = g.add_mutually_exclusive_group()
ge.add_argument("-e", "--extend", help="Extend arm", action="store_true")
ge.add_argument("-s", "--stow", help="Stow arm", action="store_true")
ge.add_argument("-w", "--wave", help="Do the worm", action="store_true")
gp = g.add_mutually_exclusive_group()
gp.add_argument("-r", "--rotate", help="Rotate arm at base (Angle)")
gp.add_argument("-p", "--position", nargs='+', type=int, help="Gripper Position Vector [radius, height]")
gp.add_argument("-i", "--icebox", help="Position gripper above ice box to deposit sample", action="store_true")
gg = g.add_mutually_exclusive_group()
gg.add_argument("-g", "--grip", help="Grip", action="store_true")
gg.add_argument("-d", "--drop", help="Release grip", action="store_true")
args = parser.parse_args()
e = args.extend
s = args.stow
w = args.wave
if args.rotate:
r = int(args.rotate)
r = False
p = args.position
i = args.icebox
g = args.grip
d = args.drop
logging.debug("Arguments parsed: e=%s, s=%s, r=%s, p=%s, i=%s, g=%s, d=%s, w=%s", + \
e, s, r, p, i, g, d, w)
if (e or s):
# GPIO_arm()
if e:
extend()
elif s:
stow()
elif ((p or r) or i):
if p:
# GPIO_arm()
r_des = p[0]
z_des = p[1]
logging.debug("r_des=%s, z_des=%s", r_des, z_des)
position_gripper(p)
elif r:
rotate_arm(r, 0, 100000)
elif i:
deposit_pos()
elif (g or d):
# GPIO_grip()
if g:
grip()
elif d:
drop()
elif w:
worm()
# GPIO cleanup
GPIO.cleanup()
|
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 3
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 15
def define_options(parser):
return
def create_system(options, system, piobus, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_CMP_directory':
panic("This script requires the MESI_CMP_directory protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
cntrl_count = 0
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
prefetcher = RubyPrefetcher.Prefetcher()
l1_cntrl = L1Cache_Controller(version = i,
cntrl_id = cntrl_count,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = (
options.cpu_type == "detailed"),
prefetcher = prefetcher,
ruby_system = ruby_system,
transitions_per_cycle=options.ports,
enable_prefetch = False)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
if piobus != None:
cpu_seq.pio_port = piobus.slave
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
cntrl_count += 1
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
cntrl_id = cntrl_count,
L2cache = l2_cache,
transitions_per_cycle=options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
cntrl_count += 1
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
#
# Create the Ruby objects associated with the directory controller
#
mem_cntrl = RubyMemoryControl(
clk_domain = ruby_system.memctrl_clk_domain,
version = i,
ruby_system = ruby_system)
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
cntrl_id = cntrl_count,
directory = \
RubyDirectoryMemory(version = i,
size = dir_size,
use_map =
options.use_map),
memBuffer = mem_cntrl,
l2_select_num_bits = l2_bits,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
cntrl_count += 1
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
cntrl_id = cntrl_count,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
cntrl_count += 1
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
|
|
## GOX-kerosene sim
#@ Author Juha Nieminen
#import sys
#sys.path.insert(0, '/Users/juhanieminen/Documents/adamrocket')
import RocketComponents as rc
from physical_constants import poise, inches, Runiv, gallons, lbm, \
gearth, atm, psi, lbf
from numpy import pi, linspace, cos, radians, sqrt, exp, log, array, full, ceil
from scipy import optimize as opt
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import Flows1D as flows
#DESIGN VARIABLES____________________________________________________________________________________
# nominal parameters
Preg_N2 = 1300*psi # regulated N2 outlet pressure [Pa]
mdot_fuel_nom = 0.2 # This is only for cooling jacket pressure drop purposes [kg/s]
Pdrop_jacket_nom= 1*psi # Cooling jacket pressure drop at mdot_nominal [Pa]
OF_nom = 2.25 # Oxidizer-to-fuel ratio. This has only effect on initial guesses during solving
# Pressurant tank dimensions
Vprestank = 0.053 # N2 pressurant tank volume [m3]
# Propellant tank dimensions
Vfueltank = 4*gallons # fuel tank volume [m3]
Voxtank = 4*0.053 # ox tank volume [m3]
# Tubing
d_presfuel_tube = 1.0*inches # pressurant tank -> fuel tank tube diameter [m]
L_presfuel_tube = 0.5 # pressurant tank -> fuel tank tube length [m]
d_oxtube = 0.87*inches # ox tank -> manifold tube diameter [m]
L_oxtube = 2.4 # ox tank -> manifold tube length [m]
d_fueltube = 0.87*inches # fuel tank -> manifold tube diameter [m]
L_fueltube = 3.0 # fuel tank -> manifold tube length [m]
roughness = 0.005 # epsilon/diameter, dimensionless
# Valves
Cv_ox_check = 4.7 # oxidizer check valve flow coefficient, dimensionless
Pcrack_ox_check = 10*psi # oxidizer check valve opening pressure [Pa]
Cv_pres_check = 1.8 # nitrogen check valve flow coefficient, dimensionless
Pcrack_pres_check = 0.33*psi # nitrogen check valve opening pressure [Pa]
Cv_pres_valve = 8.8 # nitrogen solenoid valve flow coefficient, dimensionless
Cv_ox_valve = 8.8 # oxidizer solenoid valve flow coefficient, dimensionless
Cv_fuel_valve = 8.8 # fuel solenoid valve flow coefficient, dimensionless
# Injector
cd_oxInjector = 0.767 # orifice discharge coefficient
diameter_oxInjectorHoles = 2.54e-3 #number xx drill # ox orifice diameter [m]
#length_oxHole = 0.005 # ox orifice length [m]
numOxInjectorHoles = 24 # number of ox orifices in the injector
area_oxInjector = numOxInjectorHoles*pi*diameter_oxInjectorHoles**2/4 # total ox flow area [m2]
cd_fuelInjector = 0.767 # orifice discharge coefficient
diameter_fuelInjectorHoles = 0.508e-3 #number xx drill # fuel orifice diameter [m]
numFuelHoles = 59 # number of fuel orifices in the injector
area_fuelInjector = numFuelHoles*pi*diameter_fuelInjectorHoles**2/4 # total fuel flow area [m2]
# Define initial/nominal conditions in the chamber (obtained from CEA code assuming OFratio = 2.25)
TfireInit = 293 # initial flame temperature [K]
Pfire = 1*atm # initial chamber pressure [Pa]
gammaFireInit = 1.148 # dimensionless
ga = gammaFireInit
mbarFireInit = 21.87 # combustion products' initial molecular mass [kg/kmol]
RfireInit = Runiv/mbarFireInit # combustion products' initial specific gas constant [J/kgK]
Pambient = atm # ambient pressure [Pa]
# Nozzle and chamber
d_nozzleThroat = 1.0*inches # throat diameter [m]
A_nozzleThroat = pi*d_nozzleThroat**2/4 # throat area [m2]
area_ratio = 7.46 # nozzle exit-to-throat area ratio
A_nozzleExit = area_ratio*A_nozzleThroat # nozzle exit area [m2]
d_nozzleExit = sqrt(4*A_nozzleExit/pi) # nozzle exit diameter [m]
Dchamber = 0.08 # chamber diameter [m]
Achamber = pi*Dchamber**2/4 # chamber cross sectional area [m2]
Lchamber = 0.14 # chamber length [m]
Vchamber = Achamber*Lchamber # chamber volume [m3]
Lstar = Vchamber/A_nozzleThroat # chamber characteristic length [m]
Mc_nom = flows.getIsentropicMs(A_nozzleThroat, Achamber, gammaFireInit)[0] # nominal chamber Mach number
print("throat diameter is", '%.1f'%(d_nozzleThroat*1000), 'mm')
print("exit diameter is", '%.1f'%(d_nozzleExit*1000), 'mm')
print("chamber volume is", '%.5f'%Vchamber, "m3")
print("chamber Lstar is", '%.2f'%Lstar, "m")
print("chamber Mach_nom is", '%.2f'%Mc_nom)
# INITIAL CONDITIONS____________________________________________________________________________________________
#Define initial conditions in the tanks
TfuelPresStart = 293 # Fuel pressurant (=nitrogen) temp [K]
FFfueltankStart = 0.9 # Fuel tank fill fraction (Vfuel/Vtank)
PfuelPrestankStart = 2640*psi - Preg_N2*Vfueltank*(1-FFfueltankStart)/Vprestank # Fuel pressurant tank pressure once fueltank has been pressurized [Pa]
ToxStart = 293 # Oxidizer (GOX) temp [K]
PoxtankStart = 1600*psi # Oxidizer tank pressure [Pa]
TfuelStart = 293 # Fuel temp [K]
PfueltankStart = Preg_N2 -1*psi # Fuel tank pressure [Pa] (-10psi helps convergence on first timestep)
# initialize propellants
nitrogen = rc.NitrogenFluid()
GOX = rc.GOXFluid()
kerosene = rc.Kerosene()
#initialize nozzle and chamber
nozzle = rc.ConvergingDivergingNozzle(A_nozzleExit, A_nozzleThroat)
mdot_init_noz = nozzle.getmdot(gammaFireInit, GOX.R, Pfire, TfireInit, atm)
chamber = rc.GOXKeroCombustionChamber(nozzle, Vchamber, TfireInit, ga, mbarFireInit, Pfire, atm, mdot_init_noz)
#initialize injector orifices
ox_orifice = rc.GasOrifice(area_oxInjector, cd_oxInjector, GOX.gamma, GOX.R)
fuel_orifice = rc.LiquidOrifice(area_fuelInjector, cd_fuelInjector )
#initialize pressurant tanks
fuelprestank = rc.IdealgasTank(nitrogen, Vprestank, TfuelPresStart, PfuelPrestankStart)
#initialize propellant tanks
oxtank = rc.IdealgasTank(GOX, Voxtank, ToxStart, PoxtankStart)
fueltank = rc.LiquidPropellantTank(nitrogen, kerosene, Vfueltank, TfuelStart, TfuelPresStart,\
PfueltankStart, FFfueltankStart, Preg_N2)
#initialize pressure regulators
N2_regu = rc.PressureRegulator(Preg_N2, nitrogen)
#initialize solenoids
fuelSole = rc.IncompressibleFlowSolenoid( Cv_fuel_valve)
oxSole = rc.CompressibleFlowSolenoid( Cv_ox_valve, GOX)
presSole = rc.CompressibleFlowSolenoid( Cv_pres_valve, nitrogen)
#initialize check valves
ox_check = rc.CompressibleFlowCheckValve( Cv_ox_check, Pcrack_ox_check, GOX)
pres_check = rc.CompressibleFlowCheckValve( Cv_pres_check, Pcrack_pres_check, nitrogen)
#initialize tubing
ox_tube = rc.RoughStraightCylindricalTube(d_oxtube, L_oxtube, roughness, True)
fuel_tube = rc.RoughStraightCylindricalTube(d_fueltube, L_fueltube, roughness, True)
presfuel_tube = rc.RoughStraightCylindricalTube(d_presfuel_tube, L_presfuel_tube, roughness, True)
#initialize cooling jacket
jacket = rc.CoolingJacket(mdot_fuel_nom, Pdrop_jacket_nom)
#initialize arrays for various data time histories
T_chamber = [chamber.T] # combustion chamber temperature [K]
Pchamber = [chamber.get_P_inlet()] # combustion chamber pressure [Pa]
Pexit = [nozzle.getPe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit pressure [Pa]
Mexit = [nozzle.getMe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit Mach number
cmass = [chamber.m] # resident propellant mass in combustion chamber [kg]
mdot_nozzle = [nozzle.getmdot(gammaFireInit, RfireInit, chamber.get_P_inlet(), chamber.T, chamber.Pa)] # mass flow out of the nozzle [kg/s]
Poxtank = [oxtank.getPtank()] # ox tank pressure [Pa]
Toxtank = [oxtank.getTtank()] # ox tank temperature [K]
mox = [oxtank.getM()] # oxidizer mass in tank [kg]
Pfueltank = [fueltank.getPtank()] # fuel tank pressure [Pa]
Tfueltank = [fueltank.getTpres()] # pressurant temperature in fuel tank[K]
mPresFueltank = [fueltank.getMpres()] # pressurant mass in fuel tank [kg]
mfuel = [fueltank.getMprop()] # fuel mass in tank [kg]
FFfueltank = [fueltank.getFF()] # fuel tank fill fraction defined as Vfuel/(Vfueltank)
TfuelPres = [fuelprestank.getTtank()] # temperature in fuel pressurant tank [K]
PfuelPres = [fuelprestank.getPtank()] # pressure in fuel pressurant tank [Pa]
mfuelPres = [fuelprestank.getM()] # pressurant mass in fuel pressurant tank [Pa]
time = [0] # time array [s]
mdot_ox = [0] # ox mass flow out of the tank [kg/s]
P1ox = [0] # ox tank presssure [Pa]
P2ox = [0] # ox check valve outlet pressure [Pa]
P3ox = [0] # ox flow solenoid outlet pressure [Pa]
P4ox = [0] # ox injector inlet pressure [Pa]
T1ox = [0] # ox tank temp [K]
T2ox = [0] # ox check valve outlet temp [K]
T3ox = [0] # ox flow solenoid outlet temp [K]
T4ox = [0] # ox injector inlet temp [K]
mdot_fuel = [0] # fuel mass flow out of the tank [kg/s]
rooFuel = fueltank.propellant.density # fuel density, assumed constant [kg/m3]
P1fuel = [0] # fuel tank presssure [Pa]
P2fuel = [0] # fuel solenoid outlet pressure [Pa]
P3fuel = [0] # fuel cooling jacket inlet pressure [Pa]
P4fuel = [0] # fuel injector inlet pressure [Pa]
mdot_fuel_pres = [0] # fuel pressurant mass flow rate [kg/s]
P3pres = [0] # pressurant pressure at check valve outlet [kg/s]
P4pres = [0] # pressurant pressure at solenoid valve outlet [kg/s]
mTotal = [0] # propellant mass in the system [kg]
mprs = [mfuelPres[0]+mPresFueltank[0]] # pressurant mass in the system [kg]
OFratio = [0] # oxidizer to fuel mass flow ratio
Isp = [0] # specific impulse [s]
Thrust = [nozzle.getThrust(chamber.get_P_inlet(), Pambient, gammaFireInit) ] # rocket thrust [N]
#SIMULATE_______________________________________________________________________________________________________
# using orifices as follows: ejecting GOX from manifold to chamber, fuel liq-to-liq from manifold to chamber
print("")
print("STARTING SIM...")
print("")
print("mOxStart is", '%.2f'%mox[0], "kg")
print("mKerostart is", mfuel[0], "kg")
print("mN2start in N2 tank is", '%.2f'%mfuelPres[0], "kg")
print("mN2start in fuel tank is", '%.2f'%(fueltank.getMpres()), "kg")
# The first step is to solve oxidizer and fuel mass flow rates from the tank to combustion chamber.
# definitions:
# P1ox = GOX tank pressure
# P2ox = check valve outlet pressure
# P3ox = ox valve outlet pressure
# P4ox = injector inlet, pressure
# (P1ox-P2ox) = ox check valve pressure drop, eq 1
# (P2ox-P3ox) = ox flow solenoid pressure drop, eq 2
# (P3ox-P4ox) = ox tubing pressure drop, eq 3
# (P4ox-Pchamber) = ox injector pressure drop, eq 4
# P1pres = Nitrogen tank pressure
# P2pres = Regulation pressure
# P3pres = Check valve outlet pressure
# P4pres = Nitrogen solenoid outlet
# P5pres = Nitrogen tubing outlet = fuel tank pressure
# (P2pres-P3pres) = Nitrogen check valve pressure drop
# (P3pres-P4pres) = Nitrogen solenoid valve pressure drop
# (P4pres-P5pres) = Nitrogen tubing pressure drop
# P1fuel = fuel tank pressure
# P2fuel = fuel valve outlet pressure
# P3fuel = cooling jacket inlet pressure
# P4fuel = injector inlet pressure
# (P1fuel-P2fuel) = fuel valve pressure drop, eq1
# (P2fuel-P3fuel) = fuel tubing pressure drop, eq2
# (P3fuel-P4fuel) = cooling jacket pressure drop, eq3
# (P4fuel-Pchamber) = injector pressure drop, eq4
# In the case of oxidizer, P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is the mass flow rate. The four equations are check valve/solenoid/tubing/injector pressure drops. These equations are defined in oxfunks method below, and underlying physics are in RocketComponents.py under their respective classes.
# With pressurant, P2 (regulation pressure) and P5 (fuel tank pressure) are known, so one must solve for P3 and P4. The third unknown is pressurant mass flow rate. Equations to be solved are pressure drops over the check valve, solenoid valve, and the tubing.
# With fuel P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is mass flow rate.
# fsolve requires sensible initial guesses for all unknowns. They are established by guessing the mass flow rate, because all other pressures trickle down from that.
timestep_small = 1e-5 # seconds, used during initial transient
timestep_nom = 1e-4 # seconds, used after 0.01 seconds of simulation time
t_transient = 0.01 # seconds, estimated time of initial transient
t_simulation = 3 # seconds
if t_simulation <= t_transient:
simsteps = int(ceil(t_simulation/timestep_small))
else:
simsteps = int(ceil( t_transient/timestep_small + (t_simulation-t_transient)/timestep_nom ))
print("Sim time is", t_simulation, "s, number of simsteps is", simsteps)
i=0
for i in range(0, simsteps):
if time[i] < t_transient:
timestep = timestep_small # use shorter timestep during initial transient
else: timestep = timestep_nom # proceed with nominal timestep
#while True:
print("i=", i)
P1ox = Poxtank[i]
P1fuel = Pfueltank[i]
Pchamb = Pchamber[i]
mu_ox = GOX.getViscosity(P1ox, Toxtank[i])
roo_ox = GOX.getDensity(P1ox, Toxtank[i])
Tox = Toxtank[i]
Tpres = TfuelPres[i]
mu_fuel = kerosene.mu
mu_N2_fuel = nitrogen.getViscosity(Preg_N2, TfuelPres[i])
roo_N2_fuel = nitrogen.getDensity(Preg_N2, TfuelPres[i])
if i==0: # First guesses. Based on choked flow at ox injector (multiplied by 0.7 to adjust for better convergence)
mdot_injector_choked = ox_orifice.getMdot(P1ox, Pfire, Tox)
'''
mdot_checkvalve_choked = ox_check.getMdot(P1ox, Pfire, GOX.roo_std, roo_ox, Tox)
if mdot_injector_choked >= mdot_checkvalve_choked: #check valve is choking
print("check valve is initially choking")
mdot_ox_guess = mdot_checkvalve_choked
print("mdot_ox_guess is", mdot_ox_guess)
P4ox_guess = ox_orifice.getUpstreamPressure(Pchamb, Tox, mdot_ox_guess)
P3ox_guess = P4ox_guess + ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P2ox_guess = P3ox_guess + oxSole.getPressureDrop(mdot_ox_guess, P2ox_guess, roo_ox)
else:
'''
mdot_ox_guess = mdot_injector_choked *0.7
P2ox_guess = P1ox - ox_check.getPressureDrop(mdot_ox_guess, P1ox, GOX.roo_std, roo_ox, Tox)
P3ox_guess = P2ox_guess - oxSole.getPressureDrop(mdot_ox_guess, P2ox_guess, roo_ox)
P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox is", P2ox/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P5ox_guess is", P5ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_ox_guess/OF_nom
P2fuel_guess = P1fuel - fuelSole.getPressureDrop(mdot_fuel_guess, rooFuel)
P3fuel_guess = P2fuel_guess - fuel_tube.getPressureDrop(mdot_fuel_guess, mu_fuel, rooFuel)
P4fuel_guess = P3fuel_guess - jacket.getPressureDrop(mdot_fuel_guess)
mdot_pres_guess = mdot_fuel_guess*roo_N2_fuel/rooFuel #volumetric flowrates of fuel and pressurant are the same
P3pres_guess = Preg_N2 - pres_check.getPressureDrop(mdot_pres_guess, Preg_N2, nitrogen.roo_std, roo_N2_fuel, Tpres)
P4pres_guess = P3pres_guess - presSole.getPressureDrop(mdot_pres_guess, P3pres_guess, roo_N2_fuel)
P5pres_guess = P4pres_guess - presfuel_tube.getPressureDrop(mdot_pres_guess, mu_N2_fuel, roo_N2_fuel)
#print("mdot_pres_guess is is", mdot_pres_guess, "kg/s")
#print("P3pres_guess is is", P3pres_guess/psi, "psi")
#print("P4pres_guess is is", P4pres_guess/psi, "psi")
#print("P5pres_guess is is", P5pres_guess/psi, "psi")
#print("mdot_fuel_guess is", mdot_fuel_guess)
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P5fuel_guess is is", P5fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
else : # guesses for further steps. Use values from previous timestep
mdot_ox_guess = mdot_ox[i-1] #ox_orifice.getMdot(Preg_ox, Pchamb, Tox)
#P3ox_guess = P2ox - oxSole.getPressureDrop(mdot_ox_guess, P2ox,roo_ox)
#P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P2ox_guess = P2ox[i-1]
P3ox_guess = P3ox[i-1]
P4ox_guess = P4ox[i-1]
#print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox_guess is", P2ox_guess/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_fuel[i-1] #mdot_ox_guess/OF_nom*1
P2fuel_guess = P2fuel[i-1]
P3fuel_guess = P3fuel[i-1]
P4fuel_guess = P4fuel[i-1]
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_pres_guess = mdot_fuel_pres[i-1]
P3pres_guess = P3pres[i-1]
P4pres_guess = P4pres[i-1]
initial_ox_guesses = [P2ox_guess, P3ox_guess, P4ox_guess, mdot_ox_guess]
initial_fuel_guesses= [P2fuel_guess, P3fuel_guess, P4fuel_guess, mdot_fuel_guess]
initial_pres_guesses= [P3pres_guess, P4pres_guess, mdot_pres_guess]
def oxfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("nyt TAALLA")
#print("P3 as U0 is", P3/psi, "psi")
#print("P4 as U1 is", P4/psi, "psi")
#print("P5 as U2 is", P5/psi, "psi")
#print("mdot as U3 is", mdot, "kg/s")
#print("mdot is", mdot, "kg/s")
#print("P4ox is", P4/psi, "psi")
#print("Pchamb is", Pchamb/psi, "psi")
#out = [ P2ox - P3 - ox_check.getPressureDrop(mdot, P2ox, GOX.roo_std, roo_ox, Tox) ]
out = [ mdot - ox_check.getMdot(P1ox, P2, GOX.roo_std, roo_ox, Tox) ]
out.append( P2 - P3 - oxSole.getPressureDrop( mdot, P2, roo_ox) )
out.append( P3 - P4 - ox_tube.getPressureDrop(mdot, mu_ox, roo_ox) )
out.append( mdot - ox_orifice.getMdot(P4, Pchamb, Tox) )
#print("oxoutti", out)
return out
ox_solution = opt.fsolve(oxfunks, initial_ox_guesses) # iterates until finds a solution or goes bust
#print("ox solution is", ox_solution)
mdot_ox_new = ox_solution[3]
#print("mdot_ox_nyyy is", mdot_ox_new, "kg/s")
def fuelfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("U is", U)
#print("fuelmdot is", mdot)
out = [ mdot - fuelSole.getMdot(P1fuel, P2, rooFuel, kerosene.P_crit, kerosene.P_vapor) ]
out.append( P2 - P3 - fuel_tube.getPressureDrop(mdot, mu_fuel, rooFuel) )
out.append( P3 - P4 - jacket.getPressureDrop(mdot) )
out.append( P4 - Pchamb - fuel_orifice.getPressureDrop(mdot, rooFuel) )
#print("fueloutti", out)
return out
fuel_solution = opt.fsolve(fuelfunks, initial_fuel_guesses)
#print("fuel solution is", fuel_solution)
mdot_fuel_new = fuel_solution[3]
# Now that fuel mass flow rate out has been solved, intermediate state (=no N2 inflow yet) of the fuel tank can be established:
fueltank.update(TfuelPres[i], 0, mdot_fuel_new, timestep)
Pfuel_intermediate = fueltank.getPtank()
Pfuel_eff = (Pfuel_intermediate + P1fuel)/2 # average of pressures before and after ejection of fuel from tank; incoming nitrogen will see this 'effective' pressure in the tank
# Next, nitrogen flow into the void created by ejected fuel is calculated
def presfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
mdot = U[2]
out = [mdot - pres_check.getMdot(Preg_N2, P3, nitrogen.roo_std, roo_N2_fuel, Tpres) ]
#out.append( P3 - P4 - presSole.getPressureDrop(mdot, P3, roo_N2_fuel) )
out.append( mdot - presSole.getMdot(P3, P4, roo_N2_fuel) )
out.append( P4 - Pfuel_eff - presfuel_tube.getPressureDrop(mdot, mu_N2_fuel, roo_N2_fuel) )
#out.append( mdot - presfuel_tube.getMdot(P4, Pfuel_eff, mu_N2_fuel, roo_N2_fuel) )
#print("presoutti", out)
return out
pres_solution = opt.fsolve(presfunks, initial_pres_guesses)
#print("pres solution is", pres_solution)
mdot_pres_new = pres_solution[2]
#print("mdot_pres_new is", mdot_pres_new, "kg/s")
# Determine final conditions in prop tanks now that N2 inflow has been determined
oxtank.update(mdot_ox_new, timestep)
fueltank.update(TfuelPres[i], mdot_pres_new, 0, timestep)
# ...and fuel pressurant tank
fuelprestank.update(mdot_pres_new, timestep)
# Check if OFratio is within limits. If not, stop simulation (no CEA data beyond OFratio 0.5-3.0)
if (mdot_ox_new/mdot_fuel_new) < 0.5 or (mdot_ox_new/mdot_fuel_new) > 8.0:
print("OF ratio out of range, terminate (",(mdot_ox_new/mdot_fuel_new),")")
print("mdot_ox_new is", mdot_ox_new, "kg/s")
print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
break
# Update chamber parameters:
chamber.update(mdot_ox_new, mdot_fuel_new, Pambient, timestep) # mdot_ox_in, mdot_fuel_in, Pambient, timestep
#print("mdot_ox_new is", mdot_ox_new, "kg/s")
#print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
#print("kammiopaine on", chamber.get_P_inlet()/psi, "psi" )
# Check if ox or fuel tank will empty during this timestep. If so, stop simulation.
if oxtank.getPtank() < chamber.get_P_inlet()*1.2:
print("Ox tank reached chamber pressure x1.2 (=empty) after", i, " iterations, ie", time[-1], "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fueltank.getMprop() < 0:
print("Fuel tank empty after", i, " iterations, ie", itime[-1], "seconds")
print("remaining GOX", mox[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fuelprestank.getPtank() < Preg_N2:
print("Out of fuel pressurant after", i, " iterations, ie", time[-1], "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining GOX", mox[i], "kg")
break
#update mass flow time histories. These are values during the CURRENT time step.
if i==0:
P2ox = [ox_solution[0]]
P3ox = [ox_solution[1]]
P4ox = [ox_solution[2]]
mdot_ox = [ox_solution[3]]
P2fuel = [fuel_solution[0]]
P3fuel = [fuel_solution[1]]
P4fuel = [fuel_solution[2]]
mdot_fuel = [fuel_solution[3]]
P3pres = [pres_solution[0]]
P4pres = [pres_solution[1]]
mdot_fuel_pres = [pres_solution[2]]
OFratio = [ mdot_ox[0]/mdot_fuel[0] ]
else:
P2ox.append( ox_solution[0])
P3ox.append( ox_solution[1])
P4ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
#print("mdot_pres_new solution is", pres_solution[2], "kg/s")
mdot_fuel_pres.append( pres_solution[2])
#print("i is= ", i)
OFratio.append( mdot_ox[i]/mdot_fuel[i])
#update the rest of the time histories. System will have these values during the NEXT time step.
Poxtank.append( oxtank.getPtank())
Toxtank.append( oxtank.getTtank())
mox.append( oxtank.getM())
Pfueltank.append( fueltank.getPtank())
Tfueltank.append( fueltank.getTpres())
mPresFueltank.append( fueltank.getMpres())
mfuel.append( fueltank.getMprop())
FFfueltank.append( fueltank.getFF())
TfuelPres.append( fuelprestank.getTtank())
PfuelPres.append( fuelprestank.getPtank())
mfuelPres.append( fuelprestank.getM())
#mdot_fuel_pres.append( mdot_pres_new)
Pchamber.append( chamber.get_P_inlet() )
Pexit.append( nozzle.getPe(Pchamber[i+1], chamber.gamma, Pambient) )
Mexit.append( nozzle.getMe(Pchamber[i+1], chamber.gamma, Pambient) )
cmass.append( chamber.m)
mdot_nozzle.append( nozzle.getmdot(chamber.gamma, Runiv/chamber.mbar, chamber.get_P_inlet(),\
chamber.T, chamber.Pa) )
Thrust.append( nozzle.getThrust(chamber.get_P_inlet(), Pambient, chamber.gamma) )
T_chamber.append( chamber.T)
Isp.append( Thrust[i+1]/(mdot_ox[i] + mdot_fuel[i])/9.81 )
mTotal.append(mox[i+1] + mfuel[i+1] + cmass[i+1] + mdot_nozzle[i]*timestep )
mprs.append( mPresFueltank[i+1] + mfuelPres[i+1] )
time.append( time[i]+timestep )
#dP_ox_check = (Poxtank[-1] - P2ox[-1])
#print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi")
i+=1
# Print some values
bindex = 1001
print("")
print("mdot_nozzle initial is", '%.3f'%mdot_nozzle[bindex], "kg/s")
print("initial thrust is", '%.1f'%Thrust[bindex], "N")
print("initial Isp is", '%.1f'%Isp[bindex], "s")
print("initial T_chamber is",'%.1f'%T_chamber[bindex], "K")
print("initial P_chamber is", '%.1f'%(Pchamber[bindex]/psi), "psi")
print("initial P_exit is", '%.3f'%(Pexit[bindex]/atm), "atm")
print("initial thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[bindex], atm, chamber.get_gamma(OFratio[bindex], Pchamber[bindex])) )
print("initial mdot_N2 is", '%.3f'%mdot_fuel_pres[bindex], "kg/s")
print("initial N2 flow rate is", '%.3f'%(mdot_fuel_pres[bindex]/roo_N2_fuel*1000/3.78*60), "GPM")
print("initial mdot_ox is", '%.3f'%mdot_ox[bindex], "kg/s")
print("initial mdot_fuel is", '%.3f'%mdot_fuel[bindex], "kg/s")
print("initial O/F ratio is", '%.3f'%OFratio[bindex])
print("initial ox tube velocity is", '%.1f'%(mdot_ox[bindex]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("initial fuel tube velocity is", '%.1f'%(mdot_fuel[bindex]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("initial ox injection velocity is", '%.1f'%(mdot_ox[bindex]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("initial fuel injection velocity is", '%.1f'%(mdot_fuel[bindex]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("initial ox injector P_drop", '%.1f'%((P4ox[bindex]-Pchamber[bindex])/Pchamber[bindex]*100), "% of Pchamber")
print("initial fuel injector P_drop", '%.1f'%((P4fuel[bindex]-Pchamber[bindex])/Pchamber[bindex]*100), "% of Pchamber")
print("")
print("")
print("mdot_nozzle steady state (end of sim) is", '%.3f'%mdot_nozzle[-1], "kg/s")
print("SS thrust is", '%.1f'%Thrust[-1], "N")
print("SS Isp is", '%.1f'%Isp[-1], "s")
print("SS T_chamber is",'%.1f'%T_chamber[-1], "K")
print("SS P_chamber is", '%.1f'%(Pchamber[-1]/psi), "psi")
print("SS P_exit is", '%.3f'%(Pexit[-1]/atm), "atm")
print("SS thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[-1], atm, chamber.get_gamma(OFratio[-1], Pchamber[-1])) )
print("SS mdot_N2 is", '%.3f'%mdot_fuel_pres[-1], "kg/s")
print("SS N2 flow rate is", '%.3f'%(mdot_fuel_pres[-1]/roo_N2_fuel*1000/3.78*60), "GPM")
print("SS mdot_ox is", '%.3f'%mdot_ox[-1], "kg/s")
print("SS mdot_fuel is", '%.3f'%mdot_fuel[-1], "kg/s")
print("SS O/F ratio is", '%.3f'%OFratio[-1])
print("SS ox tube velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("SS fuel tube velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("SS ox injection velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("SS fuel injection velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("SS ox injector P_drop", '%.1f'%((P4ox[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("SS fuel injector P_drop", '%.1f'%((P4fuel[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("")
# See what check valves are doing
dP_ox_check = (Poxtank[-1] - P2ox[-1])
dP_N2_check = (Preg_N2 - P3pres[-1])
if dP_ox_check < ox_check.Pcrack:
print("Warning: Pressure drop over ox check valve (",'%.1f'%(dP_ox_check/psi),"psi) is less than its cracking pressure (",ox_check.Pcrack/psi,"psi) and will remain shut")
else:
print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi, enough to keep it flowing")
if dP_N2_check < pres_check.Pcrack:
print("Warning: Pressure drop over N2 check valve(",'%.1f'%(dP_N2_check/psi),"psi) is less than its cracking pressure (",pres_check.Pcrack/psi,"psi) and will remain shut")
else:
print("N2 check valve pressure drop is", '%.1f'%(dP_N2_check/psi), "psi, enough to keep it flowing")
# following time histories are one element shorter than the rest, so the last calculated value will be duplicated to match the length of other time histories.
P2ox.append( ox_solution[0])
P3ox.append( ox_solution[1])
P4ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
mdot_fuel_pres.append( pres_solution[2])
OFratio.append( mdot_ox[i]/mdot_fuel[i])
# plot time histories
plt.ion()
plt.figure(1)
plt.plot(time, array(Poxtank)/psi, label='ox tank')
plt.figure(1)
plt.plot(time,array(P2ox)/psi, label='Pcheck_out')
plt.figure(1)
plt.plot(time,array(P3ox)/psi, label='Psolenoid_out')
plt.figure(1)
plt.plot(time,array(P4ox)/psi, label='Pinj_in')
plt.figure(1)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(1)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Ox pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('psia')
plt.show()
Preg_N2_array = full((1, len(time)), Preg_N2/psi)
plt.figure(2)
plt.plot(time, array(PfuelPres)/psi, label='fuelpres tank')
plt.figure(2)
plt.plot(time, Preg_N2_array.T, label="P_regulation")
plt.figure(2)
plt.plot(time,array(P3pres)/psi, label='N2 check valve out')
plt.figure(2)
plt.plot(time,array(P4pres)/psi, label='N2 solenoid valve out')
plt.figure(2)
plt.plot(time,array(Pfueltank)/psi, label='fuel tank')
plt.figure(2)
plt.plot(time,array(P2fuel)/psi, label='Pvalve_out')
plt.figure(2)
plt.plot(time,array(P3fuel)/psi, label='Pjacket_in')
plt.figure(2)
plt.plot(time,array(P4fuel)/psi, label='Pinj_in')
plt.figure(2)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(2)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Fuel pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('Psia')
plt.show()
plt.figure(3)
plt.plot(time,Toxtank, label='Ox tank')
plt.figure(3)
plt.plot(time,Tfueltank, label='Fuel tank')
plt.figure(3)
plt.plot(time,TfuelPres, label='fuel pressurant tank')
plt.title('Tank temperatures')
plt.legend( loc='lower left')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(4)
plt.plot(time,mdot_ox, label='mdot_ox')
plt.figure(4)
plt.plot(time,mdot_fuel, label='mdot_fuel')
plt.figure(4)
plt.plot(time,mdot_nozzle, label='mdot_nozzle')
plt.figure(4)
plt.plot(time,mdot_fuel_pres, label='mdot_fuel_pres')
plt.title('Mass flows')
plt.xlabel('Time [s]')
plt.ylabel('kg/s')
plt.legend( loc='upper right')
plt.show()
plt.figure(5)
plt.plot(time,FFfueltank, label='fuel tank')
plt.title('Fill fractions in fuel tank (Vfuel_/Vtank)')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.legend( loc='upper right')
plt.show()
plt.figure(6)
plt.plot(time, OFratio)
plt.title('O/F ratio')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.show()
plt.figure(7)
plt.plot(time,mox, label='GOX')
plt.figure(7)
plt.plot(time,mfuel, label='fuel')
plt.figure(7)
plt.plot(time,mfuelPres, label='fuel pressurant')
plt.figure(7)
plt.plot(time,mPresFueltank, label='pressurant in fuel tank')
plt.figure(7)
plt.plot(time,mprs, label='total pressurant')
plt.title('Fluid masses')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.legend( loc='upper right')
plt.show()
plt.figure(8)
plt.plot(time, cmass)
plt.title('Resident mass in chamber')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.show()
plt.figure(9)
plt.plot(time, Thrust)
plt.title('Thrust')
plt.xlabel('Time [s]')
plt.ylabel('N')
plt.show()
plt.figure(10)
plt.plot(time, Isp)
plt.title('Isp')
plt.xlabel('Time [s]')
plt.ylabel('s')
plt.show()
plt.figure(11)
plt.plot(time, T_chamber)
plt.title('T chamber')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(12)
plt.plot(time, Mexit)
plt.title('Exit Mach number')
plt.xlabel('Time [s]')
plt.ylabel('-')
plt.show()
plt.figure(13)
y1 = PfuelPres[-1]/psi
y2 = Preg_N2/psi
y3 = P3pres[-1]/psi
y4 = P4pres[-1]/psi
y5 = Pfueltank[-1]/psi
y6 = P2fuel[-1]/psi
y7 = P3fuel[-1]/psi
y8 = P4fuel[-1]/psi
y9 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Pressurant tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Check valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Pressurant solenoid")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Pressurant tubing")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Fuel tank")
plt.plot( [6, 7], [y5, y6], linewidth=2, label="Fuel solenoid")
plt.plot( [7, 8], [y6, y7], linewidth=2, label="Piping")
plt.plot( [8, 9], [y7, y8], linewidth=2, label="Cooling jacket")
plt.plot( [9, 10], [y8, y9], linewidth=2, label="Fuel injector")
plt.plot( [10, 11], [y9, y9], linewidth=2, label="Chamber")
plt.title('Fuel line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
plt.figure(14)
y1 = Poxtank[-1]/psi
y2 = P2ox[-1]/psi
y3 = P3ox[-1]/psi
y4 = P4ox[-1]/psi
y5 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Ox tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Check valve")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Ox solenoid")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Tubing")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Ox injector")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Chamber")
plt.title('Ox line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
|
|
import wx
from wx import glcanvas
from OpenGL.GL import *
from OpenGL.GLUT import *
from jumeg.tsv.plot2d.jumeg_tsv_plot2d_ogl import JuMEG_TSV_PLOT2D_OGL
# from jumeg.tsv.test.axis01 import JuMEG_TSV_AXIS
attribList = (glcanvas.WX_GL_RGBA, # RGBA
glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
glcanvas.WX_GL_DEPTH_SIZE,24) # 24 bit
#---------------------------------------------------------------------------------------
class JuMEG_TSV_PLOT2D_CanvasBase(glcanvas.GLCanvas):
def __init__(self, parent):
style = wx.DEFAULT_FRAME_STYLE # | wx.NO_FULL_REPAINT_ON_RESIZE
glcanvas.GLCanvas.__init__(self, parent, -1,attribList=attribList,style=style)
self.context = glcanvas.GLContext(self)
self.plot2d = None
self.count = 0
self.rezise_cnt= 0
self.LeftDown = False
self.is_initGL = True
self.is_on_draw = False
self.is_on_paint = False
self.is_on_size = False
# initial mouse position
self.lastx = self.x = 30
self.lasty = self.y = 30
self.size = None
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
# self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseLeftDown)
# self.Bind(wx.EVT_LEFT_UP, self.OnMouseLeftUp)
#self.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_CHAR, self.OnKeyDown)
#self.Refresh()
def OnEraseBackground(self, event):
pass # Do nothing, to avoid flashing on MSW.
def OnSize(self, event):
"""Process the resize event."""
if self.is_on_draw:
return
self.is_on_size = True
if self.LeftDown :
# print"==========>SKIP REFRESH UPDATE"
self.is_on_size = False
# event.Skip()
return
if self.GetContext():
# Make sure the frame is shown before calling SetCurrent.
self.Show()
self.SetCurrent()
self.Refresh()
event.Skip()
self.is_on_size = False
def OnPaint(self, evt):
if self.is_on_draw:
return
if self.LeftDown :
# print"-----> ONDRAW Mouse DOWN"
#evt.Skip()
self.is_on_paint = False
return
self.is_on_paint = True
# dc = wx.PaintDC(self)
dc = wx.ClientDC(self) # no border
self.OnDraw( size_mm=dc.GetSizeMM() )
# evt.Skip()
self.is_on_paint = False
# def OnMouseLeftDown(self, evt):
# self.LeftDown=True
#self.CaptureMouse()
#self.x, self.y = self.lastx, self.lasty = evt.GetPosition()
# evt.Skip()
# def OnMouseLeftUp(self, evt):
# self.LeftDown=False
# evt.Skip()
#self.Refresh(True)
#print"MUP"
#self.ReleaseMouse()
# def OnMouseMotion(self, evt):
# if evt.Dragging() and evt.LeftIsDown():
# self.lastx, self.lasty = self.x, self.y
# self.x, self.y = evt.GetPosition()
# print "on mouse drag LD"
# self.Refresh(False)
def OnKeyDown(self, e):
key = e.GetKeyCode()
# print"GLCanvas EVT OnKeyDown: " + str(key)
#---escape to quit
if key == wx.WXK_ESCAPE:
self.click_on_exit(e)
#----------------------------------------------------------------------------------------
class JuMEG_TSV_PLOT2D_WX(JuMEG_TSV_PLOT2D_CanvasBase):
def __init__(self, parent):
JuMEG_TSV_PLOT2D_CanvasBase.__init__(self,parent)
self.InitGL()
#vbox = wx.BoxSizer(wx.VERTICAL)
#self.plot_axis = JuMEG_TSV_AXIS(self)
#vbox.Add(self, 1, wx.EXPAND,0)
#vbox.Add(self.plot_axis, 0, wx.EXPAND,0)
#self.SetAutoLayout(True)
#self.SetSizer(vbox)
def OnKeyDown(self, evt):
action = None
if not self.is_initGL :
evt.skip() #---escape to quit
key = evt.GetKeyCode()
#--- scroll time fast by window
if (wx.GetKeyState(wx.WXK_CONTROL) == True):
if key == (wx.WXK_LEFT):
#print"FAST REW"
action = "FAST_REWIND"
elif key == (wx.WXK_RIGHT):
action = "FAST_FORWARD"
elif key == (wx.WXK_HOME):
action ="START"
elif key == (wx.WXK_END):
action = "END"
#----
elif key == (wx.WXK_F11):
action = "TIME_DISPLAY_ALL"
elif key ==(wx.WXK_F12):
action = "CHANNELS_DISPLAY_ALL"
#--- scroll time by scroll step
elif key == wx.WXK_LEFT:
#print"LEFT"
action = "REWIND"
elif key == wx.WXK_RIGHT:
#print "RIGHT"
action = "FORWARD"
#--- scroll channels
elif key == wx.WXK_UP:
action = "UP"
elif key == wx.WXK_DOWN:
action = "DOWN"
elif key == wx.WXK_PAGEUP:
action = "PAGEUP"
elif key == wx.WXK_PAGEDOWN:
action = "PAGEDOWN"
elif key == wx.WXK_HOME:
action = "TOP"
elif key == wx.WXK_END:
action = "BOTTOM"
#---
if action:
self.plot2d.opt.action(action)
self.update()
else:
evt.Skip()
def InitGL(self):
glutInit(sys.argv)
self.SetCurrent()
#glutInit(sys.argv)
self.plot2d = JuMEG_TSV_PLOT2D_OGL()
self.plot2d.size_in_pixel = self.GetClientSize()
self.plot2d.init_glwindow( )
#glutInit(sys.argv)
self.is_initGL = True
def OnDraw(self,size_mm=None):
if self.is_on_draw:
return
self.is_on_draw = True
if self.is_initGL:
self.SetCurrent()
else:
self.InitGL()
self.plot2d.size_in_pixel = self.GetClientSize()
self.plot2d.size_in_mm = size_mm
self.plot2d.display()
self.SwapBuffers()
self.is_on_draw = False
def update(self,raw=None): #,do_scroll_channels=True,do_scroll_time=True):
if self.is_initGL :
if raw :
self.plot2d.init_raw_data(raw=raw)
elif self.plot2d.data_is_init:
self.plot2d.update_data() #do_scroll_channels=True,do_scroll_time=True,)
#self.plot_axis.range_max = self.plot2d.timepoints[-1]
#self.plot_axis.range_min = self.plot2d.timepoints[0]
if self.plot2d.opt.do_scroll:
self.Refresh()
# self.plot_axis.range_max = self.plot2d.timepoints[-1]
#self.plot_axis.range_min = self.plot2d.timepoints[0]
#self.plot_axis.Refresh()
|
|
"""
SEP: 0002
Title: Federation protocol
Author: stellar.org
Status: Final
Created: 2017-10-30
Updated: 2019-10-10
Version 1.1.0
"""
from typing import Dict, Optional
from .. import AiohttpClient
from ..client.base_async_client import BaseAsyncClient
from ..client.base_sync_client import BaseSyncClient
from ..client.requests_client import RequestsClient
from ..client.response import Response
from ..exceptions import ValueError
from ..type_checked import type_checked
from .exceptions import (
BadFederationResponseError,
FederationServerNotFoundError,
InvalidFederationAddress,
)
from .stellar_toml import fetch_stellar_toml, fetch_stellar_toml_async
SEPARATOR = "*"
FEDERATION_SERVER_KEY = "FEDERATION_SERVER"
__all__ = [
"FederationRecord",
"resolve_stellar_address",
"resolve_stellar_address_async",
"resolve_account_id",
"resolve_account_id_async",
]
@type_checked
class FederationRecord:
def __init__(
self,
account_id: str,
stellar_address: str,
memo_type: Optional[str],
memo: Optional[str],
) -> None:
"""The :class:`FederationRecord`, which represents record in federation server.
:param account_id: Stellar public key / account ID
:param stellar_address: Stellar address
:param memo_type: Type of memo to attach to transaction, one of ``text``, ``id`` or ``hash``
:param memo: value of memo to attach to transaction, for ``hash`` this should be base64-encoded.
This field should always be of type ``string`` (even when `memo_type` is equal ``id``) to support parsing
value in languages that don't support big numbers.
"""
self.account_id: str = account_id
self.stellar_address: str = stellar_address
self.memo_type: Optional[str] = memo_type
self.memo: Optional[str] = memo
def __str__(self):
return (
f"<FederationRecord [account_id={self.account_id}, stellar_address={self.stellar_address}, "
f"memo_type={self.memo_type}, memo={self.memo}]>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.account_id == other.account_id
and self.stellar_address == other.stellar_address
and self.memo_type == other.memo_type
and self.memo == other.memo
)
@type_checked
def resolve_stellar_address(
stellar_address: str,
client: BaseSyncClient = None,
federation_url: str = None,
use_http: bool = False,
) -> FederationRecord:
"""Get the federation record if the user was found for a given Stellar address.
:param stellar_address: address Stellar address (ex. ``"bob*stellar.org"``).
:param client: Http Client used to send the request.
:param federation_url: The federation server URL (ex. ``"https://stellar.org/federation"``),
if you don't set this value, we will try to get it from `stellar_address`.
:param use_http: Specifies whether the request should go over plain HTTP vs HTTPS.
Note it is recommend that you **always** use HTTPS.
:return: Federation record.
"""
if not client:
client = RequestsClient()
parts = _split_stellar_address(stellar_address)
domain = parts["domain"]
if federation_url is None:
federation_url = fetch_stellar_toml(domain, use_http=use_http).get( # type: ignore[union-attr]
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = client.get(federation_url, {"type": "name", "q": stellar_address})
return _handle_raw_response(raw_resp, stellar_address=stellar_address)
@type_checked
async def resolve_stellar_address_async(
stellar_address: str,
client: BaseAsyncClient = None,
federation_url: str = None,
use_http: bool = False,
) -> FederationRecord:
"""Get the federation record if the user was found for a given Stellar address.
:param stellar_address: address Stellar address (ex. ``"bob*stellar.org"``).
:param client: Http Client used to send the request.
:param federation_url: The federation server URL (ex. ``"https://stellar.org/federation"``),
if you don't set this value, we will try to get it from `stellar_address`.
:param use_http: Specifies whether the request should go over plain HTTP vs HTTPS.
Note it is recommend that you **always** use HTTPS.
:return: Federation record.
"""
if not client:
client = AiohttpClient()
parts = _split_stellar_address(stellar_address)
domain = parts["domain"]
if federation_url is None:
federation_url = (
await fetch_stellar_toml_async(domain, client=client, use_http=use_http)
).get(FEDERATION_SERVER_KEY)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = await client.get(federation_url, {"type": "name", "q": stellar_address})
return _handle_raw_response(raw_resp, stellar_address=stellar_address)
@type_checked
def resolve_account_id(
account_id: str,
domain: str = None,
federation_url: str = None,
client: BaseSyncClient = None,
use_http: bool = False,
) -> FederationRecord:
"""Given an account ID, get their federation record if the user was found
:param account_id: Account ID (ex. ``"GBYNR2QJXLBCBTRN44MRORCMI4YO7FZPFBCNOKTOBCAAFC7KC3LNPRYS"``)
:param domain: Get `federation_url` from the domain, you don't need to set this value if `federation_url` is set.
:param federation_url: The federation server URL (ex. ``"https://stellar.org/federation"``).
:param client: Http Client used to send the request.
:param use_http: Specifies whether the request should go over plain HTTP vs HTTPS.
Note it is recommend that you **always** use HTTPS.
:return: Federation record.
"""
if domain is None and federation_url is None:
raise ValueError("You should provide either `domain` or `federation_url`.")
if not client:
client = RequestsClient()
if domain is not None:
federation_url = fetch_stellar_toml(domain, client, use_http).get( # type: ignore[union-attr]
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
assert federation_url is not None
raw_resp = client.get(federation_url, {"type": "id", "q": account_id})
return _handle_raw_response(raw_resp, account_id=account_id)
@type_checked
async def resolve_account_id_async(
account_id: str,
domain: str = None,
federation_url: str = None,
client: BaseAsyncClient = None,
use_http: bool = False,
) -> FederationRecord:
"""Given an account ID, get their federation record if the user was found
:param account_id: Account ID (ex. ``"GBYNR2QJXLBCBTRN44MRORCMI4YO7FZPFBCNOKTOBCAAFC7KC3LNPRYS"``)
:param domain: Get `federation_url` from the domain, you don't need to set this value if `federation_url` is set.
:param federation_url: The federation server URL (ex. ``"https://stellar.org/federation"``).
:param client: Http Client used to send the request.
:param use_http: Specifies whether the request should go over plain HTTP vs HTTPS.
Note it is recommend that you **always** use HTTPS.
:return: Federation record.
"""
if domain is None and federation_url is None:
raise ValueError("You should provide either `domain` or `federation_url`.")
if not client:
client = AiohttpClient()
if domain is not None:
federation_url = (await fetch_stellar_toml_async(domain, client, use_http)).get(
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
assert federation_url is not None
raw_resp = await client.get(federation_url, {"type": "id", "q": account_id})
return _handle_raw_response(raw_resp, account_id=account_id)
@type_checked
def _handle_raw_response(
raw_resp: Response, stellar_address=None, account_id=None
) -> FederationRecord:
if not 200 <= raw_resp.status_code < 300:
raise BadFederationResponseError(raw_resp)
data = raw_resp.json()
account_id = account_id or data.get("account_id")
stellar_address = stellar_address or data.get("stellar_address")
memo_type = data.get("memo_type")
memo = data.get("memo")
return FederationRecord(
account_id=account_id,
stellar_address=stellar_address,
memo_type=memo_type,
memo=memo,
)
@type_checked
def _split_stellar_address(address: str) -> Dict[str, str]:
parts = address.split(SEPARATOR)
if len(parts) != 2:
raise InvalidFederationAddress(
"Address should be a valid address, such as `bob*stellar.org`"
)
name, domain = parts
return {"name": name, "domain": domain}
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.i18n import _LI
from designate import rpc
LOG = logging.getLogger(__name__)
CENTRAL_API = None
class CentralAPI(object):
"""
Client side of the central RPC API.
API version history:
1.0 - Initial version
1.1 - Add new finder methods
1.2 - Add get_tenant and get_tenants
1.3 - Add get_absolute_limits
2.0 - Renamed most get_resources to find_resources
2.1 - Add quota methods
3.0 - RecordSet Changes
3.1 - Add floating ip ptr methods
3.2 - TLD Api changes
3.3 - Add methods for blacklisted domains
4.0 - Create methods now accept designate objects
4.1 - Add methods for server pools
4.2 - Add methods for pool manager integration
4.3 - Added Zone Transfer Methods
5.0 - Remove dead server code
5.1 - Add xfr_domain
5.2 - Add Zone Import methods
5.3 - Add Zone Export method
5.4 - Add asynchronous Zone Export methods
5.5 - Add deleted zone purging task
"""
RPC_API_VERSION = '5.5'
def __init__(self, topic=None):
topic = topic if topic else cfg.CONF.central_topic
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='5.5')
@classmethod
def get_instance(cls):
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup
by rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global CENTRAL_API
if not CENTRAL_API:
CENTRAL_API = cls()
return CENTRAL_API
# Misc Methods
def get_absolute_limits(self, context):
LOG.info(_LI("get_absolute_limits: "
"Calling central's get_absolute_limits."))
return self.client.call(context, 'get_absolute_limits')
# Quota Methods
def get_quotas(self, context, tenant_id):
LOG.info(_LI("get_quotas: Calling central's get_quotas."))
return self.client.call(context, 'get_quotas', tenant_id=tenant_id)
def get_quota(self, context, tenant_id, resource):
LOG.info(_LI("get_quota: Calling central's get_quota."))
return self.client.call(context, 'get_quota', tenant_id=tenant_id,
resource=resource)
def set_quota(self, context, tenant_id, resource, hard_limit):
LOG.info(_LI("set_quota: Calling central's set_quota."))
return self.client.call(context, 'set_quota', tenant_id=tenant_id,
resource=resource, hard_limit=hard_limit)
def reset_quotas(self, context, tenant_id):
LOG.info(_LI("reset_quotas: Calling central's reset_quotas."))
return self.client.call(context, 'reset_quotas', tenant_id=tenant_id)
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
LOG.info(_LI("create_tsigkey: Calling central's create_tsigkey."))
return self.client.call(context, 'create_tsigkey', tsigkey=tsigkey)
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_tsigkeys: Calling central's find_tsigkeys."))
return self.client.call(context, 'find_tsigkeys', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tsigkey(self, context, tsigkey_id):
LOG.info(_LI("get_tsigkey: Calling central's get_tsigkey."))
return self.client.call(context, 'get_tsigkey', tsigkey_id=tsigkey_id)
def update_tsigkey(self, context, tsigkey):
LOG.info(_LI("update_tsigkey: Calling central's update_tsigkey."))
return self.client.call(context, 'update_tsigkey', tsigkey=tsigkey)
def delete_tsigkey(self, context, tsigkey_id):
LOG.info(_LI("delete_tsigkey: Calling central's delete_tsigkey."))
return self.client.call(context, 'delete_tsigkey',
tsigkey_id=tsigkey_id)
# Tenant Methods
def find_tenants(self, context):
LOG.info(_LI("find_tenants: Calling central's find_tenants."))
return self.client.call(context, 'find_tenants')
def get_tenant(self, context, tenant_id):
LOG.info(_LI("get_tenant: Calling central's get_tenant."))
return self.client.call(context, 'get_tenant', tenant_id=tenant_id)
def count_tenants(self, context):
LOG.info(_LI("count_tenants: Calling central's count_tenants."))
return self.client.call(context, 'count_tenants')
# Domain Methods
def create_domain(self, context, domain):
LOG.info(_LI("create_domain: Calling central's create_domain."))
return self.client.call(context, 'create_domain', domain=domain)
def get_domain(self, context, domain_id):
LOG.info(_LI("get_domain: Calling central's get_domain."))
return self.client.call(context, 'get_domain', domain_id=domain_id)
def get_domain_servers(self, context, domain_id):
LOG.info(_LI("get_domain_servers: "
"Calling central's get_domain_servers."))
return self.client.call(context, 'get_domain_servers',
domain_id=domain_id)
def find_domains(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_domains: Calling central's find_domains."))
return self.client.call(context, 'find_domains', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_domain(self, context, criterion=None):
LOG.info(_LI("find_domain: Calling central's find_domain."))
return self.client.call(context, 'find_domain', criterion=criterion)
def update_domain(self, context, domain, increment_serial=True):
LOG.info(_LI("update_domain: Calling central's update_domain."))
return self.client.call(context, 'update_domain', domain=domain,
increment_serial=increment_serial)
def delete_domain(self, context, domain_id):
LOG.info(_LI("delete_domain: Calling central's delete_domain."))
return self.client.call(context, 'delete_domain', domain_id=domain_id)
def purge_domains(self, context, criterion=None, limit=None):
LOG.info(_LI(
"purge_domains: Calling central's purge_domains."
))
cctxt = self.client.prepare(version='5.5')
return cctxt.call(context, 'purge_domains',
criterion=criterion, limit=limit)
def count_domains(self, context, criterion=None):
LOG.info(_LI("count_domains: Calling central's count_domains."))
return self.client.call(context, 'count_domains', criterion=criterion)
def touch_domain(self, context, domain_id):
LOG.info(_LI("touch_domain: Calling central's touch_domain."))
return self.client.call(context, 'touch_domain', domain_id=domain_id)
# TLD Methods
def create_tld(self, context, tld):
LOG.info(_LI("create_tld: Calling central's create_tld."))
return self.client.call(context, 'create_tld', tld=tld)
def find_tlds(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_tlds: Calling central's find_tlds."))
return self.client.call(context, 'find_tlds', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tld(self, context, tld_id):
LOG.info(_LI("get_tld: Calling central's get_tld."))
return self.client.call(context, 'get_tld', tld_id=tld_id)
def update_tld(self, context, tld):
LOG.info(_LI("update_tld: Calling central's update_tld."))
return self.client.call(context, 'update_tld', tld=tld)
def delete_tld(self, context, tld_id):
LOG.info(_LI("delete_tld: Calling central's delete_tld."))
return self.client.call(context, 'delete_tld', tld_id=tld_id)
# RecordSet Methods
def create_recordset(self, context, domain_id, recordset):
LOG.info(_LI("create_recordset: Calling central's create_recordset."))
return self.client.call(context, 'create_recordset',
domain_id=domain_id, recordset=recordset)
def get_recordset(self, context, domain_id, recordset_id):
LOG.info(_LI("get_recordset: Calling central's get_recordset."))
return self.client.call(context, 'get_recordset', domain_id=domain_id,
recordset_id=recordset_id)
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_recordsets: Calling central's find_recordsets."))
return self.client.call(context, 'find_recordsets',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_recordset(self, context, criterion=None):
LOG.info(_LI("find_recordset: Calling central's find_recordset."))
return self.client.call(context, 'find_recordset', criterion=criterion)
def export_zone(self, context, zone_id):
LOG.info(_LI("export_zone: Calling central's export_zone."))
return self.client.call(context, 'export_zone', zone_id=zone_id)
def update_recordset(self, context, recordset, increment_serial=True):
LOG.info(_LI("update_recordset: Calling central's update_recordset."))
return self.client.call(context, 'update_recordset',
recordset=recordset,
increment_serial=increment_serial)
def delete_recordset(self, context, domain_id, recordset_id,
increment_serial=True):
LOG.info(_LI("delete_recordset: Calling central's delete_recordset."))
return self.client.call(context, 'delete_recordset',
domain_id=domain_id,
recordset_id=recordset_id,
increment_serial=increment_serial)
def count_recordsets(self, context, criterion=None):
LOG.info(_LI("count_recordsets: Calling central's count_recordsets."))
return self.client.call(context, 'count_recordsets',
criterion=criterion)
# Record Methods
def create_record(self, context, domain_id, recordset_id, record,
increment_serial=True):
LOG.info(_LI("create_record: Calling central's create_record."))
return self.client.call(context, 'create_record',
domain_id=domain_id,
recordset_id=recordset_id,
record=record,
increment_serial=increment_serial)
def get_record(self, context, domain_id, recordset_id, record_id):
LOG.info(_LI("get_record: Calling central's get_record."))
return self.client.call(context, 'get_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
def find_records(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_records: Calling central's find_records."))
return self.client.call(context, 'find_records', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_record(self, context, criterion=None):
LOG.info(_LI("find_record: Calling central's find_record."))
return self.client.call(context, 'find_record', criterion=criterion)
def update_record(self, context, record, increment_serial=True):
LOG.info(_LI("update_record: Calling central's update_record."))
return self.client.call(context, 'update_record',
record=record,
increment_serial=increment_serial)
def delete_record(self, context, domain_id, recordset_id, record_id,
increment_serial=True):
LOG.info(_LI("delete_record: Calling central's delete_record."))
return self.client.call(context, 'delete_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id,
increment_serial=increment_serial)
def count_records(self, context, criterion=None):
LOG.info(_LI("count_records: Calling central's count_records."))
return self.client.call(context, 'count_records', criterion=criterion)
# Misc. Report combining counts for tenants, domains and records
def count_report(self, context, criterion=None):
LOG.info(_LI("count_report: Calling central's count_report."))
return self.client.call(context, 'count_report', criterion=criterion)
# Sync Methods
def sync_domains(self, context):
LOG.info(_LI("sync_domains: Calling central's sync_domains."))
return self.client.call(context, 'sync_domains')
def sync_domain(self, context, domain_id):
LOG.info(_LI("sync_domain: Calling central's sync_domains."))
return self.client.call(context, 'sync_domain', domain_id=domain_id)
def sync_record(self, context, domain_id, recordset_id, record_id):
LOG.info(_LI("sync_record: Calling central's sync_record."))
return self.client.call(context, 'sync_record',
domain_id=domain_id,
recordset_id=recordset_id,
record_id=record_id)
def list_floatingips(self, context):
LOG.info(_LI("list_floatingips: Calling central's list_floatingips."))
return self.client.call(context, 'list_floatingips')
def get_floatingip(self, context, region, floatingip_id):
LOG.info(_LI("get_floatingip: Calling central's get_floatingip."))
return self.client.call(context, 'get_floatingip', region=region,
floatingip_id=floatingip_id)
def update_floatingip(self, context, region, floatingip_id, values):
LOG.info(_LI("update_floatingip: "
"Calling central's update_floatingip."))
return self.client.call(context, 'update_floatingip', region=region,
floatingip_id=floatingip_id, values=values)
# Blacklisted Domain Methods
def create_blacklist(self, context, blacklist):
LOG.info(_LI("create_blacklist: Calling central's create_blacklist"))
return self.client.call(context, 'create_blacklist',
blacklist=blacklist)
def get_blacklist(self, context, blacklist_id):
LOG.info(_LI("get_blacklist: Calling central's get_blacklist."))
return self.client.call(context, 'get_blacklist',
blacklist_id=blacklist_id)
def find_blacklists(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_blacklists: Calling central's find_blacklists."))
return self.client.call(
context, 'find_blacklists', criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_blacklist(self, context, criterion):
LOG.info(_LI("find_blacklist: Calling central's find_blacklist."))
return self.client.call(context, 'find_blacklist', criterion=criterion)
def update_blacklist(self, context, blacklist):
LOG.info(_LI("update_blacklist: Calling central's update_blacklist."))
return self.client.call(context, 'update_blacklist',
blacklist=blacklist)
def delete_blacklist(self, context, blacklist_id):
LOG.info(_LI("delete_blacklist: Calling central's delete blacklist."))
return self.client.call(context, 'delete_blacklist',
blacklist_id=blacklist_id)
# Pool Server Methods
def create_pool(self, context, pool):
LOG.info(_LI("create_pool: Calling central's create_pool."))
return self.client.call(context, 'create_pool', pool=pool)
def find_pools(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_pools: Calling central's find_pools."))
return self.client.call(context, 'find_pools', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_pool(self, context, criterion=None):
LOG.info(_LI("find_pool: Calling central's find_pool."))
return self.client.call(context, 'find_pool', criterion=criterion)
def get_pool(self, context, pool_id):
LOG.info(_LI("get_pool: Calling central's get_pool."))
return self.client.call(context, 'get_pool', pool_id=pool_id)
def update_pool(self, context, pool):
LOG.info(_LI("update_pool: Calling central's update_pool."))
return self.client.call(context, 'update_pool', pool=pool)
def delete_pool(self, context, pool_id):
LOG.info(_LI("delete_pool: Calling central's delete_pool."))
return self.client.call(context, 'delete_pool', pool_id=pool_id)
# Pool Manager Integration Methods
def update_status(self, context, domain_id, status, serial):
LOG.info(_LI("update_status: Calling central's update_status "
"for %(domain_id)s : %(status)s : %(serial)s") %
{'domain_id': domain_id, 'status': status, 'serial': serial})
self.client.cast(context, 'update_status', domain_id=domain_id,
status=status, serial=serial)
# Zone Ownership Transfers
def create_zone_transfer_request(self, context, zone_transfer_request):
LOG.info(_LI("create_zone_transfer_request: \
Calling central's create_zone_transfer_request."))
return self.client.call(
context, 'create_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def get_zone_transfer_request(self, context, zone_transfer_request_id):
LOG.info(_LI("get_zone_transfer_request: \
Calling central's get_zone_transfer_request."))
return self.client.call(
context,
'get_zone_transfer_request',
zone_transfer_request_id=zone_transfer_request_id)
def find_zone_transfer_requests(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_transfer_requests: \
Calling central's find_zone_transfer_requests."))
return self.client.call(
context, 'find_zone_transfer_requests', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_zone_transfer_request(self, context, zone_transfer_request):
LOG.info(_LI("find_zone_transfer_request: \
Calling central's find_zone_transfer_request."))
return self.client.call(
context, 'find_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def update_zone_transfer_request(self, context, zone_transfer_request):
LOG.info(_LI("update_zone_transfer_request: \
Calling central's update_zone_transfer_request."))
return self.client.call(
context, 'update_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def delete_zone_transfer_request(self, context, zone_transfer_request_id):
LOG.info(_LI("delete_zone_transfer_request: \
Calling central's delete_zone_transfer_request."))
return self.client.call(
context,
'delete_zone_transfer_request',
zone_transfer_request_id=zone_transfer_request_id)
def create_zone_transfer_accept(self, context, zone_transfer_accept):
LOG.info(_LI("create_zone_transfer_accept: \
Calling central's create_zone_transfer_accept."))
return self.client.call(
context, 'create_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def get_zone_transfer_accept(self, context, zone_transfer_accept_id):
LOG.info(_LI("get_zone_transfer_accept: \
Calling central's get_zone_transfer_accept."))
return self.client.call(
context,
'get_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def find_zone_transfer_accepts(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_transfer_accepts: \
Calling central's find_zone_transfer_accepts."))
return self.client.call(
context, 'find_zone_transfer_accepts', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_zone_transfer_accept(self, context, zone_transfer_accept):
LOG.info(_LI("find_zone_transfer_accept: \
Calling central's find_zone_transfer_accept."))
return self.client.call(
context, 'find_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def update_zone_transfer_accept(self, context, zone_transfer_accept):
LOG.info(_LI("update_zone_transfer_accept: \
Calling central's update_zone_transfer_accept."))
return self.client.call(
context, 'update_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def delete_zone_transfer_accept(self, context, zone_transfer_accept_id):
LOG.info(_LI("delete_zone_transfer_accept: \
Calling central's delete_zone_transfer_accept."))
return self.client.call(
context,
'delete_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def xfr_domain(self, context, domain_id):
LOG.info(_LI("xfr_domain: Calling central's xfr_domain"))
cctxt = self.client.prepare(version='5.3')
return cctxt.call(context, 'xfr_domain', domain_id=domain_id)
# Zone Import Methods
def create_zone_import(self, context, request_body):
LOG.info(_LI("create_zone_import: Calling central's "
"create_zone_import."))
return self.client.call(context, 'create_zone_import',
request_body=request_body)
def find_zone_imports(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_imports: Calling central's "
"find_zone_imports."))
return self.client.call(context, 'find_zone_imports',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_zone_import(self, context, zone_import_id):
LOG.info(_LI("get_zone_import: Calling central's get_zone_import."))
return self.client.call(context, 'get_zone_import',
zone_import_id=zone_import_id)
def update_zone_import(self, context, zone_import):
LOG.info(_LI("update_zone_import: Calling central's "
"update_zone_import."))
return self.client.call(context, 'update_zone_import',
zone_import=zone_import)
def delete_zone_import(self, context, zone_import_id):
LOG.info(_LI("delete_zone_import: Calling central's "
"delete_zone_import."))
return self.client.call(context, 'delete_zone_import',
zone_import_id=zone_import_id)
# Zone Export Methods
def create_zone_export(self, context, zone_id):
LOG.info(_LI("create_zone_export: Calling central's "
"create_zone_export."))
return self.client.call(context, 'create_zone_export',
zone_id=zone_id)
def find_zone_exports(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
LOG.info(_LI("find_zone_exports: Calling central's "
"find_zone_exports."))
return self.client.call(context, 'find_zone_exports',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_zone_export(self, context, zone_export_id):
LOG.info(_LI("get_zone_export: Calling central's get_zone_export."))
return self.client.call(context, 'get_zone_export',
zone_export_id=zone_export_id)
def update_zone_export(self, context, zone_export):
LOG.info(_LI("update_zone_export: Calling central's "
"update_zone_export."))
return self.client.call(context, 'update_zone_export',
zone_export=zone_export)
def delete_zone_export(self, context, zone_export_id):
LOG.info(_LI("delete_zone_export: Calling central's "
"delete_zone_export."))
return self.client.call(context, 'delete_zone_export',
zone_export_id=zone_export_id)
|
|
#!/usr/bin/env python3
import asyncio
import json
import logging
import os
import shlex
import subprocess
import time
import urllib.request
# Setup the Logger
logging.basicConfig(format='%(message)s')
log = logging.getLogger()
log.setLevel(logging.INFO)
DEFAULT_TIMEOUT = 120
LONG_TIMEOUT = 500
def find_fbsimctl_path(expected_path):
if os.path.exists(expected_path):
fbsimctl_path = os.path.realpath(expected_path)
log.info('Using fbsimctl test executable at {}'.format(fbsimctl_path))
return fbsimctl_path
else:
log.info('Using fbsimctl on PATH')
return 'fbsimctl'
class Events:
def __init__(self, events):
self.__events = events
def extend(self, events):
self.__events.extend(events)
def __repr__(self):
return '\n'.join(
[str(event) for event in self.__events],
)
def matching(self, event_name, event_type):
return [
event for event in self.__events
if event['event_name'] == event_name and event['event_type'] == event_type
]
class Simulator:
def __init__(self, json):
self.__json = json
def __repr__(self):
return str(self.__json)
def get_udid(self):
return self.__json['udid']
class FBSimctlProcess:
def __init__(
self,
arguments,
timeout
):
self.__arguments = arguments
self.__timeout = timeout
self.__events = Events([])
self.__loop = None
self.__process = None
def wait_for_event(self, event_name, event_type, timeout=None):
timeout = timeout if timeout else self.__timeout
return self.__loop.run_until_complete(
self._wait_for_event(event_name, event_type, timeout),
)
def start(self):
if self.__process:
raise Exception(
'A Process {} has allready started'.format(self.__process),
)
self.__process = self.__loop.run_until_complete(
self._start_process()
)
return self
def terminate(self):
self.__loop.run_until_complete(
self._terminate_process(),
)
def __enter__(self):
self.__loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.__loop)
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
self.__loop.close()
self.__loop = None
@asyncio.coroutine
def _start_process(self):
log.info('Opening Process with Arguments {0}'.format(
' '.join(self.__arguments),
))
create = asyncio.create_subprocess_exec(
*self.__arguments,
stdout=asyncio.subprocess.PIPE,
stderr=None,
)
process = yield from create
return process
@asyncio.coroutine
def _terminate_process(self):
if not self.__process:
raise Exception(
'Cannot termnated a process when none has started',
)
if self.__process.returncode is not None:
return
log.info('Terminating {0}'.format(self.__process))
self.__process.terminate()
yield from self.__process.wait()
log.info('Terminated {0}'.format(self.__process))
@asyncio.coroutine
def _wait_for_event(self, event_name, event_type, timeout):
matching = self._match_event(
event_name,
event_type,
)
if matching:
return matching
start_time = time.time()
while time.time() < start_time + timeout:
data = yield from self.__process.stdout.readline()
line = data.decode('utf-8').rstrip()
log.info(line)
matching = self._match_event(
event_name,
event_type,
json.loads(line),
)
if matching:
return matching
raise Exception('Timed out waiting for {0}/{1} in {2}'.format(
event_name,
event_type,
events,
))
def _match_event(self, event_name, event_type, json_event=None):
if json_event:
self.__events.extend([json_event])
matching = self.__events.matching(
event_name,
event_type,
)
if not matching:
return None
log.info('{0} matches {1}/{2}'.format(
matching,
event_name,
event_type,
))
return matching
class FBSimctl:
def __init__(self, executable_path, set_path=None):
self.__executable_path = executable_path
self.__set_path = set_path
def __call__(self, arguments):
return self.run(arguments)
def _make_arguments(self, arguments=[]):
base_arguments = [self.__executable_path]
if self.__set_path:
base_arguments += ['--set', self.__set_path]
base_arguments.append('--json')
return base_arguments + arguments
def run(self, arguments, timeout=DEFAULT_TIMEOUT):
arguments = self._make_arguments(arguments)
log.info('Running Process with Arguments {0}'.format(
' '.join(arguments),
))
process = subprocess.run(
arguments,
stdout=subprocess.PIPE,
check=True,
timeout=timeout,
)
events = [
json.loads(line) for line in str(process.stdout, 'utf-8').splitlines()
]
return Events(events)
def launch(self, arguments, timeout=DEFAULT_TIMEOUT):
return FBSimctlProcess(
arguments=self._make_arguments(arguments),
timeout=timeout,
)
class WebServer:
def __init__(self, port):
self.__port = port
def get(self, path):
request = urllib.request.Request(
url=self._make_url(path),
method='GET',
)
return self._perform_request(request)
def post(self, path, payload):
data = json.dumps(payload).encode('utf-8')
request = urllib.request.Request(
url=self._make_url(path),
data=data,
method='POST',
headers={'content-type': 'application/json'},
)
return self._perform_request(request)
def _make_url(self, path):
return 'http://localhost:{}/{}'.format(
self.__port,
path,
)
def _perform_request(self, request):
with urllib.request.urlopen(request) as f:
response = f.read().decode('utf-8')
return json.loads(response)
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fuelmenu.common.urwidwrapper as widget
from fuelmenu.settings import Settings
import logging
import netifaces
import re
import socket
import struct
import subprocess
import urwid
import urwid.raw_display
import urwid.web_display
log = logging.getLogger('fuelmenu.modulehelper')
blank = urwid.Divider()
class ModuleHelper(object):
@classmethod
def load(cls, modobj):
#Read in yaml
defaultsettings = Settings().read(modobj.parent.defaultsettingsfile)
oldsettings = defaultsettings.copy()
oldsettings.update(Settings().read(modobj.parent.settingsfile))
for setting in modobj.defaults.keys():
if "label" in setting:
continue
elif "/" in setting:
part1, part2 = setting.split("/")
modobj.defaults[setting]["value"] = oldsettings[part1][part2]
else:
modobj.defaults[setting]["value"] = oldsettings[setting]
if modobj.netsettings and oldsettings["ADMIN_NETWORK"]["interface"] \
in modobj.netsettings.keys():
modobj.activeiface = oldsettings["ADMIN_NETWORK"]["interface"]
return oldsettings
@classmethod
def save(cls, modobj, responses):
newsettings = dict()
for setting in responses.keys():
if "/" in setting:
part1, part2 = setting.split("/")
if part1 not in newsettings:
#We may not touch all settings, so copy oldsettings first
newsettings[part1] = modobj.oldsettings[part1]
newsettings[part1][part2] = responses[setting]
else:
newsettings[setting] = responses[setting]
return newsettings
@classmethod
def cancel(self, cls, button=None):
for index, fieldname in enumerate(cls.fields):
if fieldname != "blank" and "label" not in fieldname:
try:
cls.edits[index].set_edit_text(cls.defaults[fieldname][
'value'])
except AttributeError:
log.warning("Field %s unable to reset text" % fieldname)
@classmethod
def screenUI(cls, modobj, headertext, fields, defaults,
showallbuttons=False, buttons_visible=True):
log.debug("Preparing screen UI for %s" % modobj.name)
#Define text labels, text fields, and buttons first
header_content = []
for text in headertext:
if isinstance(text, str):
header_content.append(urwid.Text(text))
else:
header_content.append(text)
edits = []
toolbar = modobj.parent.footer
for key in fields:
#Example: key = hostname, label = Hostname, value = fuel-pm
if key == "blank":
edits.append(blank)
elif defaults[key]["value"] == "radio":
label = widget.TextLabel(defaults[key]["label"])
if "choices" in defaults[key]:
choices_list = defaults[key]["choices"]
else:
choices_list = ["Yes", "No"]
choices = widget.ChoicesGroup(choices_list,
default_value="Yes",
fn=modobj.radioSelect)
columns = widget.Columns([('weight', 2, label),
('weight', 3, choices)])
#Attach choices rb_group so we can use it later
columns.rb_group = choices.rb_group
edits.append(columns)
elif defaults[key]["value"] == "label":
edits.append(widget.TextLabel(defaults[key]["label"]))
else:
ispassword = "PASSWORD" in key.upper()
caption = defaults[key]["label"]
default = defaults[key]["value"]
tooltip = defaults[key]["tooltip"]
edits.append(
widget.TextField(key, caption, 23, default, tooltip,
toolbar, ispassword=ispassword))
listbox_content = []
listbox_content.extend(header_content)
listbox_content.append(blank)
listbox_content.extend(edits)
listbox_content.append(blank)
#Wrap buttons into Columns so it doesn't expand and look ugly
if buttons_visible:
#Button to check
button_check = widget.Button("Check", modobj.check)
#Button to revert to previously saved settings
button_cancel = widget.Button("Cancel", modobj.cancel)
#Button to apply (and check again)
button_apply = widget.Button("Apply", modobj.apply)
if modobj.parent.globalsave and showallbuttons is False:
check_col = widget.Columns([button_check])
else:
check_col = widget.Columns([button_check, button_cancel,
button_apply, ('weight', 2, blank)])
listbox_content.append(check_col)
#Add everything into a ListBox and return it
listwalker = widget.TabbedListWalker(listbox_content)
screen = urwid.ListBox(listwalker)
modobj.edits = edits
modobj.walker = listwalker
modobj.listbox_content = listbox_content
return screen
@classmethod
def getNetwork(cls, modobj):
"""Returns addr, broadcast, netmask for each network interface."""
re_ifaces = re.compile(r"lo|vir|vbox|docker|veth")
for iface in netifaces.interfaces():
if re_ifaces.search(iface):
continue
try:
modobj.netsettings.update({iface: netifaces.ifaddresses(iface)[
netifaces.AF_INET][0]})
modobj.netsettings[iface]["onboot"] = "Yes"
except (TypeError, KeyError):
modobj.netsettings.update({iface: {"addr": "", "netmask": "",
"onboot": "no"}})
modobj.netsettings[iface]['mac'] = netifaces.ifaddresses(iface)[
netifaces.AF_LINK][0]['addr']
#Set link state
try:
with open("/sys/class/net/%s/operstate" % iface) as f:
content = f.readlines()
modobj.netsettings[iface]["link"] = content[0].strip()
except IOError:
log.warning("Unable to read operstate file for %s" % iface)
modobj.netsettings[iface]["link"] = "unknown"
#Change unknown link state to up if interface has an IP
if modobj.netsettings[iface]["link"] == "unknown":
if modobj.netsettings[iface]["addr"] != "":
modobj.netsettings[iface]["link"] = "up"
#Read bootproto from /etc/sysconfig/network-scripts/ifcfg-DEV
modobj.netsettings[iface]['bootproto'] = "none"
try:
with open("/etc/sysconfig/network-scripts/ifcfg-%s" % iface)\
as fh:
for line in fh:
if re.match("^BOOTPROTO=", line):
modobj.netsettings[iface]['bootproto'] = \
line.split('=')[1].strip()
break
except Exception:
#Check for dhclient process running for this interface
if modobj.getDHCP(iface):
modobj.netsettings[iface]['bootproto'] = "dhcp"
else:
modobj.netsettings[iface]['bootproto'] = "none"
modobj.gateway = modobj.get_default_gateway_linux()
@classmethod
def getDHCP(cls, iface):
"""Returns True if the interface has a dhclient process running."""
noout = open('/dev/null', 'w')
dhclient_running = subprocess.call(["pgrep", "-f", "dhclient.*%s" %
(iface)], stdout=noout,
stderr=noout)
return (dhclient_running == 0)
@classmethod
def get_default_gateway_linux(cls):
"""Read the default gateway directly from /proc."""
with open("/proc/net/route") as fh:
for line in fh:
fields = line.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.api.openstack.compute.contrib import services
from nova import availability_zones
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import timeutils
from nova.servicegroup.drivers import db as db_driver
from nova import test
from nova.tests.api.openstack import fakes
fake_services_list = [
{'binary': 'nova-scheduler',
'host': 'host1',
'id': 1,
'disabled': True,
'topic': 'scheduler',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'disabled': True,
'topic': 'compute',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-scheduler',
'host': 'host2',
'id': 3,
'disabled': False,
'topic': 'scheduler',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'disabled': True,
'topic': 'compute',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)},
]
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithService(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"binary": "nova-compute"}
class FakeRequestWithHost(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"host": "host1"}
class FakeRequestWithHostService(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"host": "host1", "binary": "nova-compute"}
def fake_host_api_service_get_all(context, filters=None, set_zones=False):
if set_zones or 'availability_zone' in filters:
return availability_zones.set_availability_zones(context,
fake_services_list)
def fake_db_api_service_get_all(context, disabled=None):
return fake_services_list
def fake_service_get_by_host_binary(context, host, binary):
for service in fake_services_list:
if service['host'] == host and service['binary'] == binary:
return service
return None
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'service': 'nova-compute',
'disabled': values['disabled']}
def fake_utcnow():
return datetime.datetime(2012, 10, 29, 13, 42, 11)
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.context = context.get_admin_context()
self.controller = services.ServiceController()
self.stubs.Set(self.controller.host_api, "service_get_all",
fake_host_api_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update", fake_service_update)
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler',
'host': 'host1', 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1', 'zone': 'nova',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler', 'host': 'host2',
'zone': 'internal',
'status': 'enabled', 'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute', 'host': 'host2',
'zone': 'nova',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute', 'host': 'host1',
'zone': 'nova',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
'zone': 'nova',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-compute', 'host': 'host2',
'zone': 'nova',
'status': 'disabled', 'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
'zone': 'nova',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_enable(self):
body = {'host': 'host1', 'binary': 'nova-compute'}
req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
res_dict = self.controller.update(req, "enable", body)
self.assertEqual(res_dict['service']['status'], 'enabled')
def test_services_disable(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
body = {'host': 'host1', 'binary': 'nova-compute'}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual(res_dict['service']['status'], 'disabled')
# This test is just to verify that the servicegroup API gets used when
# calling this API.
def test_services_with_exception(self):
def dummy_is_up(self, dummy):
raise KeyError()
self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
req = FakeRequestWithHostService()
self.assertRaises(KeyError, self.controller.index, req)
|
|
#! /usr/bin/env python
from __future__ import print_function
import cv2
import numpy as np
import struct
import pandas as pd
import random
from os.path import join
from golfr.definitions import ROOT_DIR
'''
Given a bunch of points, we want to be able to
- filter out extraneous points
- fill in missed points
- identify a logical grid by grouping sqaures of points e.g.
+----+----+----+----+----+----+-----------------
| 0 | 1 | 2 | 3 | 4 | 5 | etc ...
+----+----+----+----+----+----+-----------------
| 6 | 7 | 8 | 9 | 10 | 11 | etc ...
+----+----+----+----+----+----+-----------------
| etc...
'''
def num_2_hextup(num):
return (
(num >> 16) & 0xff,
(num >> 8) & 0xff,
num & 0xff
)
def arr_2_hexstr(arr):
return '{:02X}{:02X}{:02X}'.format(arr[0], arr[1], arr[2])
def num_2_hexstr(num):
return arr_2_hexstr(num_2_hextup(num))
'''
These colors are really only used for pretty debug images
'''
def get_distinct_colors():
'''distinct_colors = [
0xFFB300, # Vivid Yellow
0x803E75, # Strong Purple
0xFF6800, # Vivid Orange
0xA6BDD7, # Very Light Blue
0xC10020, # Vivid Red
0xCEA262, # Grayish Yellow
0x817066, # Medium Gray
0x007D34, # Vivid Green
0xF6768E, # Strong Purplish Pink
0x00538A, # Strong Blue
0xFF7A5C, # Strong Yellowish Pink
0x53377A, # Strong Violet
0xFF8E00, # Vivid Orange Yellow
0xB32851, # Strong Purplish Red
0xF4C800, # Vivid Greenish Yellow
0x7F180D, # Strong Reddish Brown
0x93AA00, # Vivid Yellowish Green
0x593315, # Deep Yellowish Brown
0xF13A13, # Vivid Reddish Orange
0x232C16 # Dark Olive Green
]'''
#TODO: how do I reference a (non-python) file that's in a submodule?
# I think it should stay in the package but specifying the directory explicitly
# probably isn't the solution
df = pd.read_csv(join(ROOT_DIR,'find_grid_points/crayola120colors.txt'), sep='\t', skiprows=[0], index_col=False)
#print(df.values[:, 1:4])
return df.values[:, 1:4]
def get_on_pixel_in_region(pnt, img, fill_radius=3):
if np.any(img[pnt[1], pnt[0]]):
return tuple(pnt)
#the centroid doesn't lie on a fillable area
#so let's check the region around
#TODO: fill_radius will *not* need to be so high (hopefully it would equal 0)
# when vertical_lines isn't the outer-most contours
h, w = img.shape[:2]
# note: ideally we'd work inside out
bbox_range_x = range(
max(pnt[0] - fill_radius, 0),
min(pnt[0] + fill_radius + 1, w)
)
bbox_range_y = range(
max(pnt[1] - fill_radius, 0),
min(pnt[1] + fill_radius + 1, h)
)
for y in bbox_range_y:
for x in bbox_range_x:
#print('img[{:04d},{:04d}] == {}'.format(x,y,img[y,x]))
if np.any(img[y, x]): # NOT black
return x, y
return
'''
This would've been dead simple in numpy
'''
def get_centroid(pnts, i):
tot = 0
for pnt in pnts:
tot += pnt[i]
return tot / len(pnts)
def get_connected_pnts(pnts, floodable, DEBUG_FNAME='flooded'):
#print('vertical_lines shape: {}'.format(vertical_lines.shape))
# maxval, thresh
#floodx = vertical_lines.copy() #cv2.cvtColor(vertical_lines, cv2.COLOR_GRAY2RGB)
distinct_colors = get_distinct_colors()
distinct_colors_remain = [(int(c[0]), int(c[1]), int(c[2])) for c in distinct_colors.tolist()]
random.shuffle(distinct_colors_remain)
#print(distinct_colors_remain)
#h, w, chs = floodx.shape
h, w = floodable.shape[:2]
mask = np.zeros((h+2,w+2), np.uint8)
_, flooded = cv2.threshold(floodable, 127, 255, cv2.THRESH_BINARY)
i = 0
lines = {}
FILL_RADIUS = 7
CIRCLE_RADIUS = 20
for pnt in pnts:
flood_origin = get_on_pixel_in_region(pnt, flooded, fill_radius=FILL_RADIUS)
if not flood_origin:
raise Exception('couldn\'t find an ON pixel near the centroid (x,y)==({},{})'.format(pnt[0],pnt[1]))
pix_color = flooded[flood_origin[1],flood_origin[0]]
#print ('color to fill: {}'.format(pix_color))
#get string
hexstr = arr_2_hexstr(pix_color)
if hexstr in lines: #no need to floodfill, we already have
lines[hexstr].append(pnt)
#print('pix_color: {}'.format(clean_color))
# DEBUG: draw indicator circle
# TODO: why do I need this list-comprehension?
# I got "pix_color" from the flooded numpy ndarray
cv2.circle(flooded, tuple(pnt), CIRCLE_RADIUS, [int(x) for x in pix_color], thickness=3)
else: #flood a new color
if len(distinct_colors_remain) <= 0:
#raise Warning('not enough unique colors')
raise Exception('not enough unique colors ({})'.format(
len(lines)))
col_tup = distinct_colors_remain.pop() #num_2_hextup(distinct_colors[len(lines)%len(distinct_colors)])
cv2.floodFill(flooded, mask, flood_origin, col_tup)
hexstr = arr_2_hexstr(col_tup)
#add the color to the lines
lines[hexstr] = [pnt]
# DEBUG: draw indicator circle
cv2.circle(flooded, tuple(pnt), CIRCLE_RADIUS, col_tup, thickness=3)
i += 1
for color in lines:
print('color: 0x{}'.format(color))
for pnt in lines[color]:
print(' x,y: {:04d}, {:04d}'.format(pnt[0],pnt[1]))
print('num lines: {}'.format(len(lines)))
print('{} unique colors remaining ({:.2f}%)'.format(len(distinct_colors_remain),100.0*len(distinct_colors_remain)/distinct_colors.shape[0]))
cv2.imwrite(DEBUG_FNAME+'.jpg', flooded)
return list(lines.values())
#flooded[np.array_equal(flooded, np.ndarray([255,255,255]))] = np.ndarray([0,0,0])
#print (np.where( np.array_equal(flooded, np.ndarray([255,255,255])) ))
#https://stackoverflow.com/a/25823710
from collections import Counter
def group_points(pnts, vertical_lines, horizontal_lines):
vert_groups = get_connected_pnts(pnts, vertical_lines, DEBUG_FNAME='flooded_y')
hori_groups = get_connected_pnts(pnts, horizontal_lines, DEBUG_FNAME='flooded_x')
# sort by group size in place
vert_groups.sort(key=lambda pnts: get_centroid(pnts,0))
hori_groups.sort(key=lambda pnts: get_centroid(pnts,1))
# test
# TODO: actually do this
vert_groups[:] = [group for group in vert_groups if len(group)==7]
hori_groups[:] = [group for group in hori_groups if len(group)==24]
print('vertical group sizes:')
#for num_occ, alen in Counter([len(x) for x in vert_groups]).items():
# print('{}: {}'.format(num_occ, alen))
for group in vert_groups:
print(len(group))
print('horizontal group sizes:')
for group in hori_groups:
print(len(group))
for vert_group in vert_groups:
|
|
#!/usr/bin/env python2.7
'''
For more information on daemons in python, see:
* http://pypi.python.org/pypi/python-daemon
* http://www.python.org/dev/peps/pep-3143/
* http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
* http://en.wikipedia.org/wiki/Daemon_(computing)
Here are some similar implementations:
* http://pypi.python.org/pypi/zdaemon/2.0.4
* https://github.com/indexzero/forever
This module has two separate uses cases:
* running a command as a daemon process
* forking the current process as a daemon.
Daemonizing a command allows one to start, stop, and restart a non-daemon
command as a daemon process. This requires specifying a pid file which is used
to interact with the process.
Usage examples:
daemoncmd start --pidfile /tmp/daemon.pid \
--stdout /tmp/daemon.log --stderr /tmp/daemon.log sleep 100
daemoncmd restart --pidfile /tmp/daemon.pid \
--stdout /tmp/daemon.log --stderr /tmp/daemon.log sleep 100
daemoncmd status --pidfile /tmp/daemon.pid
daemoncmd stop --pidfile /tmp/daemon.pid
Another use case is forking the current process into a daemon. According
to pep 3143, forking as a daemon might be done by the standard library some
day.
Usage example:
import daemoncmd
import mytask
daemoncmd.daemonize()
mytask.doit()
Or from the command line:
python -c 'import daemoncmd, mytask; daemoncmd.daemonize(); mytask.doit()'
Other usage notes:
* The command should not daemonize itself, since that is what this script does
and it would make the pid in the pidfile incorrect.
* The command should be refer to the absolute path of the executable, since
daemonization sets the cwd to '/'. More generally, do not assume what the
cwd is.
* If daemoncmd is run by monit, etc., PATH and other env vars might be
restricted for security reasons.
* daemoncmd does not try to run the daemon as a particular uid. That would
be handled by a process manager like monit, launchd, init, god, etc.
* When running under monit, etc., pass environment variables to the command
like so:
FOO=testing daemoncmd start --pidfile /tmp/daemon.pid \
--stdout /tmp/daemon.log printenv FOO
'''
__version__ = '0.2.0'
import sys
import os
import signal
import errno
import time
import argparse
def start(argv, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
'''
Start a new daemon, saving its pid to pidfile.
Do not start the daemon if the pidfile exists and the pid in it is running.
'''
# use absolute path, since daemonize() changes cwd
pidfile = os.path.abspath(pidfile)
pid = getpid(pidfile)
# start process pidfile does not have pid or the pid is not a running
# process.
if pid and running(pid):
mess = "Start aborted since pid file '%s' exists" % pidfile
mess += " and pid '%s' is running.\n" % pid
sys.stderr.write(mess)
sys.exit(1)
sys.stdout.write('Starting process.\n')
daemonize_command(argv, pidfile, stdin, stdout, stderr)
def stop(pidfile):
'''
pidfile: a file containing a process id.
stop the pid in pidfile if pidfile contains a pid and it is running.
'''
# use absolute path, since daemonize() changes cwd
pidfile = os.path.abspath(pidfile)
pid = getpid(pidfile)
# stop process (if it exists)
if not pid:
sys.stderr.write(("Warning: Could not stop process because pid file "
"'%s' is missing.\n" % pidfile))
elif not running(pid):
sys.stderr.write(('Warning: pid "%s" in pid file "%s" is already not '
'running.\n' % (pid, pidfile)))
else:
sys.stdout.write('Stopping process. pid={0}\n'.format(pid))
try:
os.kill(pid, signal.SIGTERM)
# a pause, so daemon will have a chance to stop before it gets restarted.
time.sleep(1)
except OSError as err:
sys.stderr.write('Failed to terminate pid "%s". Exception: %s.\n'
% (pid, err))
sys.exit(1)
def restart(argv, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
'''
stop the process in pidfile. start argv as a new daemon process. save its
pid to pidfile.
'''
stop(pidfile)
start(argv, pidfile, stdin, stdout, stderr)
def status(pidfile):
# use absolute path, since daemonize() changes cwd
pidfile = os.path.abspath(pidfile)
pid = getpid(pidfile)
if pid and running(pid):
sys.stdout.write('process running; pid={0}\n'.format(pid))
else:
sys.stdout.write('process stopped\n')
def daemonize_command(argv, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
'''
argv: list of executable and arguments. First item is the executable. e.g.
['/bin/sleep', '100']
pidfile: filename. pid of the daemon child process will be written to the
file. This is useful for monitoring services that need a pid to stop the
daemon, etc.
Calls daemonize, which exits the calling process and continues in the child
process. Therefore all code after calling daemonize_command will be
executed in the daemon process.
'''
# use absolute path, since daemonize() changes cwd
pidfile = os.path.abspath(pidfile)
daemonize(stdin, stdout, stderr)
# now we are in the daemon process
# save pid to a file
if pidfile:
setpid(os.getpid(), pidfile)
# do not spawn a subprocess, since the daemon process is the one we want to
# start/stop/restart/etc.
os.execvp(argv[0], argv)
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''
stdin, stdout, stderr: filename that will be opened and used to replace the
standard file descriptors is sys.stdin, sys.stdout, sys.stderr. Default to
/dev/null. Note that stderr is opened unbuffered, so if it shares a file
with stdout then interleaved output may not appear in the order that you
expect.
Turn current process into a daemon.
returns: nothing in particular.
'''
# use absolute path, since daemonize() changes cwd
stdin = os.path.abspath(stdin)
stdout = os.path.abspath(stdout)
stderr = os.path.abspath(stderr)
# Do first fork
try:
pid = os.fork()
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n"%(e.errno, e.strerror))
sys.exit(1)
if pid > 0:
sys.exit(0) # exit parent
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n"%(e.errno, e.strerror))
sys.exit(1)
if pid > 0:
sys.exit(0) # exit parent
# Now I am a daemon!
# Redirect standard file descriptors. First open the new files, perform
# hack if necessary, flush any existing output, and dup new files to std
# streams.
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
# hack and bug: when sys.stdin.close() has already been called,
# os.dup2 throws an exception: ValueError: I/O operation on closed file
# This hack attempts to detect whether any of the std streams
# have been closed and if so opens them to a dummy value which
# will get closed by os.dup2, which I like better than
# an exception being thrown.
if sys.stdin.closed: sys.stdin = open('/dev/null', 'r')
if sys.stdout.closed: sys.stdout = open('/dev/null', 'a+')
if sys.stderr.closed: sys.stderr = open('/dev/null', 'a+')
sys.stdout.flush()
sys.stderr.flush()
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def getpid(pidfile):
'''
read pid from pidfile
return: pid
'''
pid = None
if os.path.isfile(pidfile):
with open(pidfile) as fh:
try:
pid = int(fh.read().strip())
except ValueError:
pass
return pid
def setpid(pid, pidfile):
'''
save pid to pidfile
'''
with open(pidfile, 'w') as fh:
fh.write('{0}\n'.format(pid))
def running(pid):
"""
pid: a process id
Return: False if the pid is None or if the pid does not match a
currently-running process.
Derived from code in http://pypi.python.org/pypi/python-daemon/ runner.py
"""
if pid is None:
return False
try:
os.kill(pid, signal.SIG_DFL)
except OSError, exc:
if exc.errno == errno.ESRCH:
# The specified PID does not exist
return False
return True
def main():
# daemoncmd <command>
# daemoncmd start --pidfile <file> [--stdin <file>] [--stdout <file>] \
# [--stderr <file>] <command>
# daemoncmd restart --pidfile <file> [--stdin <file>] [--stdout <file>]\
# [--stderr <file>] <command>
# daemoncmd stop --pidfile <file>
# daemoncmd status --pidfile <file>
parser = argparse.ArgumentParser(
description=('Turn any command into a daemon. Use start, stop, ' +
'restart, and status to control the daemon.'))
subparsers = parser.add_subparsers(dest='action')
# create the parser for the "start" command
startParser = subparsers.add_parser('start',
help='Start a daemon to run a command')
startParser.add_argument(
'--pidfile', required=True,
help='file in which to store the pid of the started daemon process')
startParser.add_argument('--stdin', default='/dev/null',
help='Redirect daemon stdin from this file')
startParser.add_argument('--stdout', default='/dev/null',
help='Redirect daemon stdout to this file')
startParser.add_argument('--stderr', default='/dev/null',
help='Redirect daemon stderr to this file')
startParser.add_argument(
'cmd',
help=('The executable/command that the daemon will run. i.e. a '
'server that listens on a port for incoming connections.'))
startParser.add_argument('args', nargs='*',
help='options or arguments to the command')
stopParser = subparsers.add_parser('stop', help='Stop a running daemon')
stopParser.add_argument('--pidfile', required=True,
help='file containing the pid of daemon process')
stopParser = subparsers.add_parser(
'status', help='Print the status of a daemon process')
stopParser.add_argument('--pidfile', required=True,
help='file containing the pid of daemon process')
restartParser = subparsers.add_parser(
'restart', help='Restart a daemon to run a command')
restartParser.add_argument(
'--pidfile', required=True,
help='file in which to store the pid of the started daemon process')
restartParser.add_argument('--stdin', default='/dev/null',
help='Redirect daemon stdin from this file')
restartParser.add_argument('--stdout', default='/dev/null',
help='Redirect daemon stdout to this file')
restartParser.add_argument('--stderr', default='/dev/null',
help='Redirect daemon stderr to this file')
restartParser.add_argument(
'cmd',
help=('The executable/command that the daemon will run. i.e. a '
'server that listens on a port for incoming connections.'))
restartParser.add_argument('args', nargs='*',
help='options or arguments to the command')
args = parser.parse_args()
if args.action == 'start':
start([args.cmd] + args.args, args.pidfile, args.stdin, args.stdout, args.stderr)
elif args.action == 'restart':
restart([args.cmd] + args.args, args.pidfile, args.stdin, args.stdout, args.stderr)
elif args.action == 'stop':
stop(args.pidfile)
elif args.action == 'status':
status(args.pidfile)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2008, Casey Duncan (casey dot duncan at gmail dot com)
# see LICENSE.txt for details
"""Perlin noise -- pure python implementation"""
__version__ = '$Id: perlin.py 521 2008-12-15 03:03:52Z casey.duncan $'
from math import floor, fmod, sqrt
from random import randint
# 3D Gradient vectors
_GRAD3 = ((1,1,0),(-1,1,0),(1,-1,0),(-1,-1,0),
(1,0,1),(-1,0,1),(1,0,-1),(-1,0,-1),
(0,1,1),(0,-1,1),(0,1,-1),(0,-1,-1),
(1,1,0),(0,-1,1),(-1,1,0),(0,-1,-1),
)
# 4D Gradient vectors
_GRAD4 = ((0,1,1,1), (0,1,1,-1), (0,1,-1,1), (0,1,-1,-1),
(0,-1,1,1), (0,-1,1,-1), (0,-1,-1,1), (0,-1,-1,-1),
(1,0,1,1), (1,0,1,-1), (1,0,-1,1), (1,0,-1,-1),
(-1,0,1,1), (-1,0,1,-1), (-1,0,-1,1), (-1,0,-1,-1),
(1,1,0,1), (1,1,0,-1), (1,-1,0,1), (1,-1,0,-1),
(-1,1,0,1), (-1,1,0,-1), (-1,-1,0,1), (-1,-1,0,-1),
(1,1,1,0), (1,1,-1,0), (1,-1,1,0), (1,-1,-1,0),
(-1,1,1,0), (-1,1,-1,0), (-1,-1,1,0), (-1,-1,-1,0))
# A lookup table to traverse the simplex around a given point in 4D.
# Details can be found where this table is used, in the 4D noise method.
_SIMPLEX = (
(0,1,2,3),(0,1,3,2),(0,0,0,0),(0,2,3,1),(0,0,0,0),(0,0,0,0),(0,0,0,0),(1,2,3,0),
(0,2,1,3),(0,0,0,0),(0,3,1,2),(0,3,2,1),(0,0,0,0),(0,0,0,0),(0,0,0,0),(1,3,2,0),
(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),
(1,2,0,3),(0,0,0,0),(1,3,0,2),(0,0,0,0),(0,0,0,0),(0,0,0,0),(2,3,0,1),(2,3,1,0),
(1,0,2,3),(1,0,3,2),(0,0,0,0),(0,0,0,0),(0,0,0,0),(2,0,3,1),(0,0,0,0),(2,1,3,0),
(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),(0,0,0,0),
(2,0,1,3),(0,0,0,0),(0,0,0,0),(0,0,0,0),(3,0,1,2),(3,0,2,1),(0,0,0,0),(3,1,2,0),
(2,1,0,3),(0,0,0,0),(0,0,0,0),(0,0,0,0),(3,1,0,2),(0,0,0,0),(3,2,0,1),(3,2,1,0))
# Simplex skew constants
_F2 = 0.5 * (sqrt(3.0) - 1.0)
_G2 = (3.0 - sqrt(3.0)) / 6.0
_F3 = 1.0 / 3.0
_G3 = 1.0 / 6.0
class BaseNoise:
"""Noise abstract base class"""
permutation = (151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190,6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168,68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152,2,44,154,163,70,221,153,101,155,167,43,172,9,
129,22,39,253,9,98,108,110,79,113,224,232,178,185,112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214,31,181,199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180)
period = len(permutation)
# Double permutation array so we don't need to wrap
permutation = permutation * 2
def __init__(self, period=None, permutation_table=None):
"""Initialize the noise generator. With no arguments, the default
period and permutation table are used (256). The default permutation
table generates the exact same noise pattern each time.
An integer period can be specified, to generate a random permutation
table with period elements. The period determines the (integer)
interval that the noise repeats, which is useful for creating tiled
textures. period should be a power-of-two, though this is not
enforced. Note that the speed of the noise algorithm is indpendent of
the period size, though larger periods mean a larger table, which
consume more memory.
A permutation table consisting of an iterable sequence of whole
numbers can be specified directly. This should have a power-of-two
length. Typical permutation tables are a sequnce of unique integers in
the range [0,period) in random order, though other arrangements could
prove useful, they will not be "pure" simplex noise. The largest
element in the sequence must be no larger than period-1.
period and permutation_table may not be specified together.
"""
if period is not None and permutation_table is not None:
raise ValueError(
'Can specify either period or permutation_table, not both')
if period is not None:
self.randomize(period)
elif permutation_table is not None:
self.permutation = tuple(permutation_table) * 2
self.period = len(permutation_table)
def randomize(self, period=None):
"""Randomize the permutation table used by the noise functions. This
makes them generate a different noise pattern for the same inputs.
"""
if period is not None:
self.period = period
perm = list(range(self.period))
perm_right = self.period - 1
for i in list(perm):
j = randint(0, perm_right)
perm[i], perm[j] = perm[j], perm[i]
self.permutation = tuple(perm) * 2
class SimplexNoise(BaseNoise):
"""Perlin simplex noise generator
Adapted from Stefan Gustavson's Java implementation described here:
http://staffwww.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf
To summarize:
"In 2001, Ken Perlin presented 'simplex noise', a replacement for his classic
noise algorithm. Classic 'Perlin noise' won him an academy award and has
become an ubiquitous procedural primitive for computer graphics over the
years, but in hindsight it has quite a few limitations. Ken Perlin himself
designed simplex noise specifically to overcome those limitations, and he
spent a lot of good thinking on it. Therefore, it is a better idea than his
original algorithm. A few of the more prominent advantages are:
* Simplex noise has a lower computational complexity and requires fewer
multiplications.
* Simplex noise scales to higher dimensions (4D, 5D and up) with much less
computational cost, the complexity is O(N) for N dimensions instead of
the O(2^N) of classic Noise.
* Simplex noise has no noticeable directional artifacts. Simplex noise has
a well-defined and continuous gradient everywhere that can be computed
quite cheaply.
* Simplex noise is easy to implement in hardware."
"""
def noise2(self, x, y):
"""2D Perlin simplex noise.
Return a floating point value from -1 to 1 for the given x, y coordinate.
The same value is always returned for a given x, y pair unless the
permutation table changes (see randomize above).
"""
# Skew input space to determine which simplex (triangle) we are in
s = (x + y) * _F2
i = floor(x + s)
j = floor(y + s)
t = (i + j) * _G2
x0 = x - (i - t) # "Unskewed" distances from cell origin
y0 = y - (j - t)
if x0 > y0:
i1 = 1; j1 = 0 # Lower triangle, XY order: (0,0)->(1,0)->(1,1)
else:
i1 = 0; j1 = 1 # Upper triangle, YX order: (0,0)->(0,1)->(1,1)
x1 = x0 - i1 + _G2 # Offsets for middle corner in (x,y) unskewed coords
y1 = y0 - j1 + _G2
x2 = x0 + _G2 * 2.0 - 1.0 # Offsets for last corner in (x,y) unskewed coords
y2 = y0 + _G2 * 2.0 - 1.0
# Determine hashed gradient indices of the three simplex corners
perm = self.permutation
ii = int(i) % self.period
jj = int(j) % self.period
gi0 = perm[ii + perm[jj]] % 12
gi1 = perm[ii + i1 + perm[jj + j1]] % 12
gi2 = perm[ii + 1 + perm[jj + 1]] % 12
# Calculate the contribution from the three corners
tt = 0.5 - x0**2 - y0**2
if tt > 0:
g = _GRAD3[gi0]
noise = tt**4 * (g[0] * x0 + g[1] * y0)
else:
noise = 0.0
tt = 0.5 - x1**2 - y1**2
if tt > 0:
g = _GRAD3[gi1]
noise += tt**4 * (g[0] * x1 + g[1] * y1)
tt = 0.5 - x2**2 - y2**2
if tt > 0:
g = _GRAD3[gi2]
noise += tt**4 * (g[0] * x2 + g[1] * y2)
return noise * 70.0 # scale noise to [-1, 1]
def noise3(self, x, y, z):
"""3D Perlin simplex noise.
Return a floating point value from -1 to 1 for the given x, y, z coordinate.
The same value is always returned for a given x, y, z pair unless the
permutation table changes (see randomize above).
"""
# Skew the input space to determine which simplex cell we're in
s = (x + y + z) * _F3
i = floor(x + s)
j = floor(y + s)
k = floor(z + s)
t = (i + j + k) * _G3
x0 = x - (i - t) # "Unskewed" distances from cell origin
y0 = y - (j - t)
z0 = z - (k - t)
# For the 3D case, the simplex shape is a slightly irregular tetrahedron.
# Determine which simplex we are in.
if x0 >= y0:
if y0 >= z0:
i1 = 1; j1 = 0; k1 = 0
i2 = 1; j2 = 1; k2 = 0
elif x0 >= z0:
i1 = 1; j1 = 0; k1 = 0
i2 = 1; j2 = 0; k2 = 1
else:
i1 = 0; j1 = 0; k1 = 1
i2 = 1; j2 = 0; k2 = 1
else: # x0 < y0
if y0 < z0:
i1 = 0; j1 = 0; k1 = 1
i2 = 0; j2 = 1; k2 = 1
elif x0 < z0:
i1 = 0; j1 = 1; k1 = 0
i2 = 0; j2 = 1; k2 = 1
else:
i1 = 0; j1 = 1; k1 = 0
i2 = 1; j2 = 1; k2 = 0
# Offsets for remaining corners
x1 = x0 - i1 + _G3
y1 = y0 - j1 + _G3
z1 = z0 - k1 + _G3
x2 = x0 - i2 + 2.0 * _G3
y2 = y0 - j2 + 2.0 * _G3
z2 = z0 - k2 + 2.0 * _G3
x3 = x0 - 1.0 + 3.0 * _G3
y3 = y0 - 1.0 + 3.0 * _G3
z3 = z0 - 1.0 + 3.0 * _G3
# Calculate the hashed gradient indices of the four simplex corners
perm = self.permutation
ii = int(i) % self.period
jj = int(j) % self.period
kk = int(k) % self.period
gi0 = perm[ii + perm[jj + perm[kk]]] % 12
gi1 = perm[ii + i1 + perm[jj + j1 + perm[kk + k1]]] % 12
gi2 = perm[ii + i2 + perm[jj + j2 + perm[kk + k2]]] % 12
gi3 = perm[ii + 1 + perm[jj + 1 + perm[kk + 1]]] % 12
# Calculate the contribution from the four corners
noise = 0.0
tt = 0.6 - x0**2 - y0**2 - z0**2
if tt > 0:
g = _GRAD3[gi0]
noise = tt**4 * (g[0] * x0 + g[1] * y0 + g[2] * z0)
else:
noise = 0.0
tt = 0.6 - x1**2 - y1**2 - z1**2
if tt > 0:
g = _GRAD3[gi1]
noise += tt**4 * (g[0] * x1 + g[1] * y1 + g[2] * z1)
tt = 0.6 - x2**2 - y2**2 - z2**2
if tt > 0:
g = _GRAD3[gi2]
noise += tt**4 * (g[0] * x2 + g[1] * y2 + g[2] * z2)
tt = 0.6 - x3**2 - y3**2 - z3**2
if tt > 0:
g = _GRAD3[gi3]
noise += tt**4 * (g[0] * x3 + g[1] * y3 + g[2] * z3)
return noise * 32.0
def lerp(t, a, b):
return a + t * (b - a)
def grad3(hash, x, y, z):
g = _GRAD3[hash % 16]
return x*g[0] + y*g[1] + z*g[2]
class TileableNoise(BaseNoise):
"""Tileable implemention of Perlin "improved" noise. This
is based on the reference implementation published here:
http://mrl.nyu.edu/~perlin/noise/
"""
def noise3(self, x, y, z, repeat, base=0.0):
"""Tileable 3D noise.
repeat specifies the integer interval in each dimension
when the noise pattern repeats.
base allows a different texture to be generated for
the same repeat interval.
"""
i = int(fmod(floor(x), repeat))
j = int(fmod(floor(y), repeat))
k = int(fmod(floor(z), repeat))
ii = (i + 1) % repeat
jj = (j + 1) % repeat
kk = (k + 1) % repeat
if base:
i += base; j += base; k += base
ii += base; jj += base; kk += base
x -= floor(x); y -= floor(y); z -= floor(z)
fx = x**3 * (x * (x * 6 - 15) + 10)
fy = y**3 * (y * (y * 6 - 15) + 10)
fz = z**3 * (z * (z * 6 - 15) + 10)
perm = self.permutation
A = perm[i]
AA = perm[A + j]
AB = perm[A + jj]
B = perm[ii]
BA = perm[B + j]
BB = perm[B + jj]
return lerp(fz, lerp(fy, lerp(fx, grad3(perm[AA + k], x, y, z),
grad3(perm[BA + k], x - 1, y, z)),
lerp(fx, grad3(perm[AB + k], x, y - 1, z),
grad3(perm[BB + k], x - 1, y - 1, z))),
lerp(fy, lerp(fx, grad3(perm[AA + kk], x, y, z - 1),
grad3(perm[BA + kk], x - 1, y, z - 1)),
lerp(fx, grad3(perm[AB + kk], x, y - 1, z - 1),
grad3(perm[BB + kk], x - 1, y - 1, z - 1))))
|
|
#!/usr/bin/env python
#
# Copyright 2015-2021 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cartola import fs
import unittest
import firenado.conf
from importlib import reload
from tests import chdir_app
import logging
import os
class ApplicationComponentTestCase(unittest.TestCase):
""" Case that tests an Firenado application after being loaded from its
configuration file.
"""
def test_conf_root(self):
""" Test if Firenado root matches the upper directory relative to the
current one. """
import os
current_path = os.path.dirname(os.path.realpath(__file__))
firenado_root = ("%s" % os.sep).join(current_path.split(os.sep)[:-1])
self.assertEqual(firenado_root, firenado.conf.ROOT)
def test_conf_root(self):
""" Test if Firenado root matches the upper directory relative to the
current one. """
current_path = os.path.dirname(os.path.realpath(__file__))
firenado_root = ("%s" % os.sep).join(current_path.split(os.sep)[:-1])
firenado_root = os.path.join(firenado_root, "firenado")
self.assertEqual(firenado_root, firenado.conf.ROOT)
def test_firenado_config_file_default_value(self):
""" Test if the default Firenado config file value will be "firenado".
"""
self.assertEqual("firenado", firenado.conf.FIRENADO_CONFIG_FILE)
def test_firenado_config_file_custom_value(self):
""" Test if Firenado config file value will be changed setting
FIRENADO_CONFIG_FILE env variable.
"""
custom_file_name = "custom_file"
os.environ['FIRENADO_CONFIG_FILE'] = custom_file_name
reload(firenado.conf)
self.assertEqual(firenado.conf.FIRENADO_CONFIG_FILE, custom_file_name)
del os.environ['FIRENADO_CONFIG_FILE']
reload(firenado.conf)
def test_system_config_path_default_value(self):
""" Test if the default system config path is /etc/firenado
"""
self.assertEqual(firenado.conf.SYS_CONFIG_PATH, "/etc/firenado")
def test_system_config_path_custom_value(self):
""" Test if the default system config path will be changed setting the
FIRENADO_SYS_CONFIG_PATH env variable
"""
custom_sys_config_path = "/etc/anotherplace"
os.environ['FIRENADO_SYS_CONFIG_PATH'] = custom_sys_config_path
reload(firenado.conf)
self.assertEqual(firenado.conf.SYS_CONFIG_PATH, custom_sys_config_path)
del os.environ['FIRENADO_SYS_CONFIG_PATH']
reload(firenado.conf)
def test_only_framework_stack(self):
""" Tests is only the framework config stack was loaded.
No app config is provided.
"""
self.assertEqual(firenado.conf.stack[0],
firenado.conf.LIB_CONFIG_FILE)
def test_app_stack(self):
""" Application config is provided. Test if the app config file was
loaded.
"""
chdir_app("yml", "conf")
self.assertEqual(firenado.conf.stack[0],
firenado.conf.LIB_CONFIG_FILE)
self.assertEqual(firenado.conf.stack[1],
firenado.conf.APP_CONFIG_FILE)
def test_sys_stack(self):
""" System config path is provided. Test if the system config file was
loaded.
"""
os.environ['FIRENADO_SYS_CONFIG_PATH'] = os.path.join(
os.path.dirname(__file__), "resources", "conf", "sys_config")
reload(firenado.conf)
self.assertEqual(firenado.conf.stack[0],
firenado.conf.LIB_CONFIG_FILE)
self.assertEqual(firenado.conf.stack[1],
firenado.conf.SYS_CONFIG_FILE)
self.assertEqual("%(asctime)s - %(message)s",
firenado.conf.log['format'])
self.assertEqual(logging.DEBUG, firenado.conf.log['level'])
del os.environ['FIRENADO_SYS_CONFIG_PATH']
reload(firenado.conf)
def test_app_addresses_default(self):
""" If no addresses are provided to the application we default to
ipv4 and ipv6 loopbacks.
"""
# There is no addresses configured into the conf/yml firenado.yml
chdir_app("yml", "conf")
self.assertTrue(firenado.conf.app['socket'] is None)
self.assertEqual(len(firenado.conf.app['addresses']), 2)
self.assertEqual(firenado.conf.app['addresses'][0], "::")
self.assertEqual(firenado.conf.app['addresses'][1], "0.0.0.0")
def test_app_addresses_from_conf(self):
""" Getting localhost defined into the configuration.
"""
# At the conf/root_url app.addresses has only localhost
chdir_app("root_url", "conf")
self.assertTrue(firenado.conf.app['socket'] is None)
self.assertEqual(len(firenado.conf.app['addresses']), 1)
self.assertEqual(firenado.conf.app['addresses'][0], "localhost")
def test_app_port(self):
""" Checks if the app port is set correctly.
"""
# Loading file from test/resources/session/file/conf/firenado.yml
chdir_app("file", "session")
self.assertTrue(firenado.conf.app['socket'] is None)
self.assertEqual(firenado.conf.app['port'], 8887)
def test_app_pythonpath(self):
""" Checks if the pythonpath is set on the application config file.
"""
chdir_app("file", "session")
self.assertEqual(firenado.conf.app['pythonpath'], "..")
def test_yml_loaded(self):
""" On an application with a yml and yaml config files the yml should
be loaded.
"""
chdir_app("yml", "conf")
self.assertEqual("yml", fs.get_file_extension(
firenado.conf.APP_CONFIG_FILE))
def test_settings_empty(self):
""" If no app settings is defined an empty dict is set.
"""
chdir_app("yml", "conf")
self.assertDictEqual({}, firenado.conf.app['settings'])
def test_settings(self):
""" If no app settings is defined an empty dict is set.
"""
chdir_app("settings", "conf")
settings_dict = {
'cookie_secret': "cookie---secret",
'debug': True,
'xsrf_cookies': True
}
self.assertDictEqual(settings_dict, firenado.conf.app['settings'])
def test_static_path(self):
""" If static path is defined on the app configuration.
"""
chdir_app("yml", "conf")
self.assertEqual("yml_static_path", firenado.conf.app['static_path'])
def test_root_url(self):
""" Test if the root path was set on the app configuration.
"""
chdir_app("root_url", "conf")
self.assertEqual("a_root_url", firenado.conf.app['url_root_path'])
def test_root_url_slash_in_front(self):
""" Test if the root path with a slash in the front will be returned
without it was set on the app configuration.
"""
chdir_app("root_url_slash_in_front", "conf")
self.assertEqual("a_root_url", firenado.conf.app['url_root_path'])
def test_root_url_slash_none(self):
""" Test if the root path with a slash in the front will be returned
without it was set on the app configuration.
"""
chdir_app("root_url_slash_none", "conf")
self.assertEqual(None, firenado.conf.app['url_root_path'])
def test_static_path(self):
""" If static url prefix is defined on the app configuration.
"""
chdir_app("yml", "conf")
self.assertEqual("yml_static_url_prefix",
firenado.conf.app['static_url_prefix'])
def test_session_type_file(self):
""" Checks if the session is enabled and the type is file
"""
chdir_app("file", "session")
self.assertEqual(firenado.conf.session['enabled'], True)
self.assertEqual(firenado.conf.session['type'], "file")
def test_session_name_default(self):
""" Checks if the session name is default, FIRENADOSESSID
"""
chdir_app("file", "session")
self.assertEqual(firenado.conf.session['enabled'], True)
self.assertEqual(firenado.conf.session['name'], "FIRENADOSESSID")
def test_session_type_redis(self):
""" Checks if the session is enabled and the type is redis
"""
chdir_app("redis", "session")
self.assertEqual(firenado.conf.session['enabled'], True)
self.assertEqual(firenado.conf.session['type'], "redis")
def test_session_name_custom(self):
""" Checks if the session name will be defined as in the config file
"""
chdir_app("redis", "session")
self.assertEqual(firenado.conf.session['enabled'], True)
self.assertEqual(firenado.conf.session['name'], "REDISSESSID")
class MultiAppTestCase(unittest.TestCase):
""" Case that tests multi app configuration.
"""
def test_multi_app_true(self):
""" Checks if the application is multi app
"""
chdir_app("multiapp")
self.assertTrue(firenado.conf.is_multi_app)
def test_multi_app_false(self):
""" Checks if the application isn't multi app
"""
chdir_app("tornadoweb")
self.assertFalse(firenado.conf.is_multi_app)
|
|
import pytest
from collections import OrderedDict
from modularodm.exceptions import ValidationError
from addons.box.models import BoxFile
from addons.dropbox.models import DropboxFile
from addons.github.models import GithubFile
from addons.googledrive.models import GoogleDriveFile
from addons.osfstorage.models import OsfStorageFile
from addons.s3.models import S3File
from website import settings
from website.util import permissions
from addons.osfstorage import settings as osfstorage_settings
from website.project.views.comment import update_file_guid_referent
from website.project.signals import comment_added, mention_added, contributor_added
from framework.exceptions import PermissionsError
from tests.base import capture_signals
from osf.models import Comment, NodeLog, Guid, FileNode
from osf.modm_compat import Q
from osf.utils.auth import Auth
from .factories import (
CommentFactory,
ProjectFactory,
NodeFactory,
UserFactory,
AuthUserFactory
)
# All tests will require a databse
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def node(user):
return NodeFactory(creator=user)
@pytest.fixture()
def auth(user):
return Auth(user)
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def component(user, project):
return NodeFactory(parent=project, creator=user)
def test_comments_have_longer_guid():
comment = CommentFactory()
assert len(comment._id) == 12
def test_comments_are_queryable_by_root_target():
root_target = ProjectFactory()
comment = CommentFactory(node=root_target)
assert Comment.find(Q('root_target', 'eq', root_target.guids.first()))[0] == comment
# copied from tests/test_comments.py
class TestCommentModel:
def test_create(self):
first_comment = CommentFactory()
auth = Auth(user=first_comment.user)
comment = Comment.create(
auth=auth,
user=first_comment.user,
node=first_comment.node,
target=first_comment.target,
root_target=first_comment.root_target,
page='node',
content='This is a comment, and ya cant teach that.'
)
assert comment.user == first_comment.user
assert comment.node == first_comment.node
assert comment.target == first_comment.target
assert comment.node.logs.count() == 2
assert comment.node.logs.latest().action == NodeLog.COMMENT_ADDED
assert [] == first_comment.ever_mentioned
def test_create_comment_content_cannot_exceed_max_length_simple(self, node, user, auth):
with pytest.raises(ValidationError):
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content=''.join(['c' for c in range(settings.COMMENT_MAXLENGTH + 3)])
)
def test_create_comment_content_cannot_exceed_max_length_complex(self, node, user, auth):
with pytest.raises(ValidationError):
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content=''.join(['c' for c in range(settings.COMMENT_MAXLENGTH - 8)]) + '[@George Ant](http://localhost:5000/' + user._id + '/)'
)
def test_create_comment_content_does_not_exceed_max_length_complex(self, node, user, auth):
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content=''.join(['c' for c in range(settings.COMMENT_MAXLENGTH - 12)]) + '[@George Ant](http://localhost:5000/' + user._id + '/)'
)
def test_create_comment_content_cannot_be_none(self, node, user, auth):
with pytest.raises(ValidationError) as error:
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content=None
)
assert error.value.messages[0] == 'This field cannot be null.'
def test_create_comment_content_cannot_be_empty(self, node, user, auth):
with pytest.raises(ValidationError) as error:
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content=''
)
assert error.value.messages[0] == 'This field cannot be blank.'
def test_create_comment_content_cannot_be_whitespace(self, node, user, auth):
with pytest.raises(ValidationError) as error:
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content=' '
)
assert error.value.messages[0] == 'Value must not be empty.'
def test_create_sends_comment_added_signal(self, node, user, auth):
with capture_signals() as mock_signals:
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content='This is a comment.'
)
assert mock_signals.signals_sent() == ({comment_added})
def test_create_sends_mention_added_signal_if_mentions(self, node, user, auth):
with capture_signals() as mock_signals:
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content='This is a comment with a bad mention [@Unconfirmed User](http://localhost:5000/' + user._id + '/).'
)
assert mock_signals.signals_sent() == ({comment_added, mention_added})
def test_create_does_not_send_mention_added_signal_if_unconfirmed_contributor_mentioned(self, node, user, auth):
with pytest.raises(ValidationError) as error:
with capture_signals() as mock_signals:
user = UserFactory()
user.is_registered = False
user.is_claimed = False
user.save()
node.add_contributor(user, visible=False, permissions=[permissions.READ], save=True)
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content='This is a comment with a bad mention [@Unconfirmed User](http://localhost:5000/' + user._id + '/).'
)
assert mock_signals.signals_sent() == ({contributor_added})
assert error.value.message == 'User does not exist or is not active.'
def test_create_does_not_send_mention_added_signal_if_noncontributor_mentioned(self, node, user, auth):
with pytest.raises(ValidationError) as error:
with capture_signals() as mock_signals:
user = UserFactory()
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content='This is a comment with a bad mention [@Non-contributor User](http://localhost:5000/' + user._id + '/).'
)
assert mock_signals.signals_sent() == set([])
assert error.value.message == 'Mentioned user is not a contributor.'
def test_create_does_not_send_mention_added_signal_if_nonuser_mentioned(self, node, user, auth):
with pytest.raises(ValidationError) as error:
with capture_signals() as mock_signals:
Comment.create(
auth=auth,
user=user,
node=node,
target=node.guids.all()[0],
content='This is a comment with a bad mention [@Not a User](http://localhost:5000/qwert/).'
)
assert mock_signals.signals_sent() == set([])
assert error.value.message == 'User does not exist or is not active.'
def test_edit(self):
comment = CommentFactory()
auth = Auth(comment.user)
comment.edit(
auth=auth,
content='edited',
save=True
)
assert comment.content == 'edited'
assert comment.modified
assert comment.node.logs.count() == 2
assert comment.node.logs.latest().action == NodeLog.COMMENT_UPDATED
def test_edit_sends_mention_added_signal_if_mentions(self):
comment = CommentFactory()
auth = Auth(comment.user)
with capture_signals() as mock_signals:
comment.edit(
auth=auth,
content='This is a comment with a bad mention [@Mentioned User](http://localhost:5000/' + comment.user._id + '/).',
save=True
)
assert mock_signals.signals_sent() == ({mention_added})
def test_edit_does_not_send_mention_added_signal_if_nonuser_mentioned(self):
comment = CommentFactory()
auth = Auth(comment.user)
with pytest.raises(ValidationError) as error:
with capture_signals() as mock_signals:
comment.edit(
auth=auth,
content='This is a comment with a bad mention [@Not a User](http://localhost:5000/qwert/).',
save=True
)
assert mock_signals.signals_sent() == set([])
assert error.value.message == 'User does not exist or is not active.'
def test_edit_does_not_send_mention_added_signal_if_noncontributor_mentioned(self):
comment = CommentFactory()
auth = Auth(comment.user)
with pytest.raises(ValidationError) as error:
with capture_signals() as mock_signals:
user = UserFactory()
comment.edit(
auth=auth,
content='This is a comment with a bad mention [@Non-contributor User](http://localhost:5000/' + user._id + '/).',
save=True
)
assert mock_signals.signals_sent() == set([])
assert error.value.message == 'Mentioned user is not a contributor.'
def test_edit_does_not_send_mention_added_signal_if_unconfirmed_contributor_mentioned(self):
comment = CommentFactory()
auth = Auth(comment.user)
with pytest.raises(ValidationError) as error:
with capture_signals() as mock_signals:
user = UserFactory()
user.is_registered = False
user.is_claimed = False
user.save()
comment.node.add_contributor(user, visible=False, permissions=[permissions.READ])
comment.node.save()
comment.edit(
auth=auth,
content='This is a comment with a bad mention [@Unconfirmed User](http://localhost:5000/' + user._id + '/).',
save=True
)
assert mock_signals.signals_sent() == ({contributor_added})
assert error.value.message == 'User does not exist or is not active.'
def test_edit_does_not_send_mention_added_signal_if_already_mentioned(self):
comment = CommentFactory()
auth = Auth(comment.user)
with capture_signals() as mock_signals:
comment.ever_mentioned = [comment.user._id]
comment.edit(
auth=auth,
content='This is a comment with a bad mention [@Already Mentioned User](http://localhost:5000/' + comment.user._id + '/).',
save=True
)
assert mock_signals.signals_sent() == set([])
def test_delete(self, node):
comment = CommentFactory(node=node)
auth = Auth(comment.user)
comment.delete(auth=auth, save=True)
assert comment.is_deleted, True
assert comment.node.logs.count() == 2
assert comment.node.logs.latest().action == NodeLog.COMMENT_REMOVED
def test_undelete(self):
comment = CommentFactory()
auth = Auth(comment.user)
comment.delete(auth=auth, save=True)
comment.undelete(auth=auth, save=True)
assert not comment.is_deleted
assert comment.node.logs.count() == 3
assert comment.node.logs.latest().action == NodeLog.COMMENT_RESTORED
def test_read_permission_contributor_can_comment(self):
project = ProjectFactory()
user = UserFactory()
project.set_privacy('private')
project.add_contributor(user, permissions=[permissions.READ])
project.save()
assert project.can_comment(Auth(user=user))
def test_get_content_for_not_deleted_comment(self):
project = ProjectFactory(is_public=True)
comment = CommentFactory(node=project)
content = comment.get_content(auth=Auth(comment.user))
assert content == comment.content
def test_get_content_returns_deleted_content_to_commenter(self):
comment = CommentFactory(is_deleted=True)
content = comment.get_content(auth=Auth(comment.user))
assert content == comment.content
def test_get_content_does_not_return_deleted_content_to_non_commenter(self):
user = AuthUserFactory()
comment = CommentFactory(is_deleted=True)
content = comment.get_content(auth=Auth(user))
assert content is None
def test_get_content_public_project_does_not_return_deleted_content_to_logged_out_user(self):
project = ProjectFactory(is_public=True)
comment = CommentFactory(node=project, is_deleted=True)
content = comment.get_content(auth=None)
assert content is None
def test_get_content_private_project_throws_permissions_error_for_logged_out_users(self):
project = ProjectFactory(is_public=False)
comment = CommentFactory(node=project, is_deleted=True)
with pytest.raises(PermissionsError):
comment.get_content(auth=None)
def test_find_unread_is_zero_when_no_comments(self):
n_unread = Comment.find_n_unread(user=UserFactory(), node=ProjectFactory(), page='node')
assert n_unread == 0
def test_find_unread_new_comments(self):
project = ProjectFactory()
user = UserFactory()
project.add_contributor(user, save=True)
CommentFactory(node=project, user=project.creator)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 1
def test_find_unread_includes_comment_replies(self):
project = ProjectFactory()
user = UserFactory()
project.add_contributor(user, save=True)
comment = CommentFactory(node=project, user=user)
CommentFactory(node=project, target=Guid.load(comment._id), user=project.creator)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 1
def test_find_unread_does_not_include_deleted_comments(self):
project = ProjectFactory()
user = AuthUserFactory()
project.add_contributor(user)
project.save()
CommentFactory(node=project, user=project.creator, is_deleted=True)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 0
# copied from tests/test_comments.py
class FileCommentMoveRenameTestMixin(object):
# TODO: Remove skip decorators when files are implemented
# and when waterbutler payloads are consistently formatted
# for intra-provider folder moves and renames.
id_based_providers = ['osfstorage']
@property
def provider(self):
raise NotImplementedError
@property
def ProviderFile(self):
raise NotImplementedError
@classmethod
def _format_path(cls, path, file_id=None):
return path
def _create_source_payload(self, path, node, provider, file_id=None):
return OrderedDict([('materialized', path),
('name', path.split('/')[-1]),
('nid', node._id),
('path', self._format_path(path, file_id)),
('provider', provider),
('url', '/project/{}/files/{}/{}/'.format(node._id, provider, path.strip('/'))),
('node', {'url': '/{}/'.format(node._id), '_id': node._id, 'title': node.title}),
('addon', provider)])
def _create_destination_payload(self, path, node, provider, file_id, children=None):
destination_path = PROVIDER_CLASS.get(provider)._format_path(path=path, file_id=file_id)
destination = OrderedDict([('contentType', ''),
('etag', 'abcdefghijklmnop'),
('extra', OrderedDict([('revisionId', '12345678910')])),
('kind', 'file'),
('materialized', path),
('modified', 'Tue, 02 Feb 2016 17:55:48 +0000'),
('name', path.split('/')[-1]),
('nid', node._id),
('path', destination_path),
('provider', provider),
('size', 1000),
('url', '/project/{}/files/{}/{}/'.format(node._id, provider, path.strip('/'))),
('node', {'url': '/{}/'.format(node._id), '_id': node._id, 'title': node.title}),
('addon', provider)])
if children:
destination_children = [self._create_destination_payload(child['path'], child['node'], child['provider'], file_id) for child in children]
destination.update({'children': destination_children})
return destination
def _create_payload(self, action, user, source, destination, file_id, destination_file_id=None):
return OrderedDict([
('action', action),
('auth', OrderedDict([('email', user.username), ('id', user._id), ('name', user.fullname)])),
('destination', self._create_destination_payload(path=destination['path'],
node=destination['node'],
provider=destination['provider'],
file_id=destination_file_id or file_id,
children=destination.get('children', []))),
('source', self._create_source_payload(source['path'], source['node'], source['provider'], file_id=file_id)),
('time', 100000000),
('node', source['node']),
('project', None)
])
def _create_file_with_comment(self, node, path, user):
self.file = self.ProviderFile.create(
is_file=True,
node=node,
path=path,
name=path.strip('/'),
materialized_path=path)
self.guid = self.file.get_guid(create=True)
self.file.save()
self.comment = CommentFactory(user=user, node=node, target=self.guid)
def test_comments_move_on_file_rename(self, project, user):
source = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': '/file_renamed.txt',
'node': project,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_renamed', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_on_folder_rename(self, project, user):
source = {
'path': '/subfolder1/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder2/',
'node': project,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_renamed', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_on_subfolder_file_when_parent_folder_is_renamed(self, project, user):
source = {
'path': '/subfolder1/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder2/',
'node': project,
'provider': self.provider
}
file_path = 'sub-subfolder/file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_path), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_renamed', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_path), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_to_subfolder(self, project, user):
source = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder/file.txt',
'node': project,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_from_subfolder_to_root(self, project, user):
source = {
'path': '/subfolder/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_from_project_to_component(self, project, component, user):
source = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': '/file.txt',
'node': component,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
assert self.guid.referent.node._id == destination['node']._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_from_component_to_project(self, project, component, user):
source = {
'path': '/file.txt',
'node': component,
'provider': self.provider
}
destination = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
assert self.guid.referent.node._id == destination['node']._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_to_subfolder(self, user, project):
source = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder2/subfolder/',
'node': project,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_from_subfolder_to_root(self, project, user):
source = {
'path': '/subfolder2/subfolder/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_from_project_to_component(self, project, component, user):
source = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': component,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_from_component_to_project(self, project, component, user):
source = {
'path': '/subfolder/',
'node': component,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_to_osfstorage(self, project, user):
osfstorage = project.get_addon('osfstorage')
root_node = osfstorage.get_root()
osf_file = root_node.append_file('file.txt')
osf_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png',
'etag': 'abcdefghijklmnop'
}).save()
source = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': osf_file.path,
'node': project,
'provider': 'osfstorage'
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id, destination_file_id=destination['path'].strip('/'))
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class('osfstorage', FileNode.FILE).get_or_create(destination['node'], destination['path'])
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_to_osfstorage(self, project, user):
osfstorage = project.get_addon('osfstorage')
root_node = osfstorage.get_root()
osf_folder = root_node.append_folder('subfolder')
osf_file = osf_folder.append_file('file.txt')
osf_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png',
'etag': '1234567890abcde'
}).save()
source = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': project,
'provider': 'osfstorage',
'children': [{
'path': '/subfolder/file.txt',
'node': project,
'provider': 'osfstorage'
}]
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id, destination_file_id=osf_file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class('osfstorage', FileNode.FILE).get_or_create(destination['node'], osf_file._id)
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
@pytest.mark.parametrize(
['destination_provider', 'destination_path'],
[('box', '/1234567890'), ('dropbox', '/file.txt'), ('github', '/file.txt'), ('googledrive', '/file.txt'), ('s3', '/file.txt')]
)
def test_comments_move_when_file_moved_to_different_provider(self, destination_provider, destination_path, project, user):
if self.provider == destination_provider:
return True
project.add_addon(destination_provider, auth=Auth(user))
project.save()
self.addon_settings = project.get_addon(destination_provider)
self.addon_settings.folder = '/AddonFolder'
self.addon_settings.save()
source = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': destination_path,
'node': project,
'provider': destination_provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(destination_provider, FileNode.FILE).get_or_create(destination['node'], destination['path'])
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
@pytest.mark.parametrize(
['destination_provider', 'destination_path'],
[('box', '/1234567890'), ('dropbox', '/subfolder/file.txt'), ('github', '/subfolder/file.txt'), ('googledrive', '/subfolder/file.txt'), ('s3', '/subfolder/file.txt'),]
)
def test_comments_move_when_folder_moved_to_different_provider(self, destination_provider, destination_path, project, user):
if self.provider == destination_provider:
return True
project.add_addon(destination_provider, auth=Auth(user))
project.save()
self.addon_settings = project.get_addon(destination_provider)
self.addon_settings.folder = '/AddonFolder'
self.addon_settings.save()
source = {
'path': '/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': project,
'provider': destination_provider,
'children': [{
'path': '/subfolder/file.txt',
'node': project,
'provider': destination_provider
}]
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(destination_provider, FileNode.FILE).get_or_create(destination['node'], destination_path)
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
# copied from tests/test_comments.py
class TestOsfstorageFileCommentMoveRename(FileCommentMoveRenameTestMixin):
provider = 'osfstorage'
ProviderFile = OsfStorageFile
@classmethod
def _format_path(cls, path, file_id=None):
super(TestOsfstorageFileCommentMoveRename, cls)._format_path(path)
return '/{}{}'.format(file_id, ('/' if path.endswith('/') else ''))
def _create_file_with_comment(self, node, path, user):
osfstorage = node.get_addon(self.provider)
root_node = osfstorage.get_root()
self.file = root_node.append_file('file.txt')
self.file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png',
'etag': 'abcdefghijklmnop'
}).save()
self.file.materialized_path = path
self.guid = self.file.get_guid(create=True)
self.comment = CommentFactory(user=user, node=node, target=self.guid)
def test_comments_move_when_file_moved_from_project_to_component(self, project, component, user):
source = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
destination = {
'path': '/file.txt',
'node': component,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
self.file.move_under(destination['node'].get_addon(self.provider).get_root())
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
assert self.guid.referent.node._id == destination['node']._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_from_component_to_project(self, project, component, user):
source = {
'path': '/file.txt',
'node': component,
'provider': self.provider
}
destination = {
'path': '/file.txt',
'node': project,
'provider': self.provider
}
self._create_file_with_comment(node=source['node'], path=source['path'], user=user)
self.file.move_under(destination['node'].get_addon(self.provider).get_root())
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path(destination['path'], file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
assert self.guid.referent.node._id == destination['node']._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_from_project_to_component(self, project, component, user):
source = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': component,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
self.file.move_under(destination['node'].get_addon(self.provider).get_root())
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_folder_moved_from_component_to_project(self, project, component, user):
source = {
'path': '/subfolder/',
'node': component,
'provider': self.provider
}
destination = {
'path': '/subfolder/',
'node': project,
'provider': self.provider
}
file_name = 'file.txt'
self._create_file_with_comment(node=source['node'], path='{}{}'.format(source['path'], file_name), user=user)
self.file.move_under(destination['node'].get_addon(self.provider).get_root())
payload = self._create_payload('move', user, source, destination, self.file._id)
update_file_guid_referent(self=None, node=destination['node'], event_type='addon_file_moved', payload=payload)
self.guid.reload()
file_node = FileNode.resolve_class(self.provider, FileNode.FILE).get_or_create(destination['node'], self._format_path('{}{}'.format(destination['path'], file_name), file_id=self.file._id))
assert self.guid._id == file_node.get_guid()._id
file_comments = Comment.find(Q('root_target', 'eq', self.guid.pk))
assert file_comments.count() == 1
def test_comments_move_when_file_moved_to_osfstorage(self):
# Already in OSFStorage
pass
def test_comments_move_when_folder_moved_to_osfstorage(self):
# Already in OSFStorage
pass
# copied from tests/test_comments.py
class TestBoxFileCommentMoveRename(FileCommentMoveRenameTestMixin):
provider = 'box'
ProviderFile = BoxFile
def _create_file_with_comment(self, node, path, user):
self.file = self.ProviderFile.create(
is_file=True,
node=node,
path=self._format_path(path),
name=path.strip('/'),
materialized_path=path)
self.file.save()
self.guid = self.file.get_guid(create=True)
self.comment = CommentFactory(user=user, node=node, target=self.guid)
@classmethod
def _format_path(cls, path, file_id=None):
super(TestBoxFileCommentMoveRename, cls)._format_path(path)
return '/9876543210/' if path.endswith('/') else '/1234567890'
@pytest.mark.skip
class TestDropboxFileCommentMoveRename(FileCommentMoveRenameTestMixin):
provider = 'dropbox'
ProviderFile = DropboxFile
def _create_file_with_comment(self, node, path, user):
self.file = self.ProviderFile.create(
is_file=True,
node=node,
path='{}{}'.format(node.get_addon(self.provider).folder, path),
name=path.strip('/'),
materialized_path=path)
self.file.save()
self.guid = self.file.get_guid(create=True)
self.comment = CommentFactory(user=user, node=node, target=self.guid)
@pytest.mark.skip
class TestGoogleDriveFileCommentMoveRename(FileCommentMoveRenameTestMixin):
provider = 'googledrive'
ProviderFile = GoogleDriveFile
@pytest.mark.skip
class TestGithubFileCommentMoveRename(FileCommentMoveRenameTestMixin):
provider = 'github'
ProviderFile = GithubFile
@pytest.mark.skip
class TestS3FileCommentMoveRename(FileCommentMoveRenameTestMixin):
provider = 's3'
ProviderFile = S3File
PROVIDER_CLASS = {
'osfstorage': TestOsfstorageFileCommentMoveRename,
'box': TestBoxFileCommentMoveRename,
'dropbox': TestDropboxFileCommentMoveRename,
'github': TestGithubFileCommentMoveRename,
'googledrive': TestGoogleDriveFileCommentMoveRename,
's3': TestS3FileCommentMoveRename
}
|
|
from django.conf import settings
from numbers import Number
import json
import os
def check_for_target_url_duplication_and_generate_error_message(self, partner=False):
"""
Filter for partners (PROXY and BUNDLE) where the
target_url is the same as self. On filtering, if we have
a non-zero number of matches, we generate the appropriate
error message to be shown to the staff.
:param self:
:param partner:
:return:
"""
from TWLight.resources.models import Partner
duplicate_target_url_partners = Partner.objects.filter(
authorization_method__in=[Partner.PROXY, Partner.BUNDLE],
target_url=self.target_url,
).values_list("company_name", flat=True)
# Exclude self from the filtered partner list, if the operation
# is performed on Partners.
if partner:
duplicate_target_url_partners = duplicate_target_url_partners.exclude(
pk=self.pk
)
partner_duplicates_count = duplicate_target_url_partners.count()
if partner_duplicates_count != 0:
validation_error_msg = (
"No two or more partners can have the same target url. "
"The following partner(s) have the same target url: "
)
validation_error_msg_partners = "None"
if partner_duplicates_count > 1:
validation_error_msg_partners = ", ".join(duplicate_target_url_partners)
elif partner_duplicates_count == 1:
validation_error_msg_partners = duplicate_target_url_partners[0]
return validation_error_msg + " Partner(s): " + validation_error_msg_partners
return None
def get_partner_description_json_schema():
"""
JSON Schema for partner description translations
"""
from TWLight.resources.models import Partner
no_of_partners = Partner.objects.count()
no_of_possible_descriptions = (
no_of_partners * 2
) + 1 # The extra item is the metadata key
JSON_SCHEMA_PARTNER_DESCRIPTION = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"maxItems": no_of_possible_descriptions,
}
return JSON_SCHEMA_PARTNER_DESCRIPTION
def get_partner_description(
language_code: str, partner_short_description_key: str, partner_description_key: str
):
"""
Function that gets a partner's short description and description in the language
set by the user. If the descriptions don't exist in that language, the default
will be returned (English)
Parameters
----------
language_code: str
The language code the user has selected on TWL's settings
partner_short_description_key: str
The partner short description key that should be found in a json file
partner_description_key: str
The partner description key that should be found in a json file
Returns
-------
dict
"""
descriptions = {}
# Getting the default file in case the description does not exist in
# the language file
partner_default_descriptions_dict = _read_translation_file(
"en", "partner_descriptions"
)
partner_descriptions_dict = _read_translation_file(
language_code, "partner_descriptions"
)
descriptions["short_description"] = _get_any_description(
partner_default_descriptions_dict,
partner_descriptions_dict,
partner_short_description_key,
)
descriptions["description"] = _get_any_description(
partner_default_descriptions_dict,
partner_descriptions_dict,
partner_description_key,
)
return descriptions
def get_tag_names(language_code: str, tag_field: dict):
"""
Function that gets a partner's tag in the user's preferred language.
If the tags don't exist in that language, the default
will be returned (English)
Parameters
----------
language_code: str
The language code the user has selected on TWL's settings
tag_field: dict
The new_tags JSONField that contains the tag's names
Returns
-------
dict
"""
tag_names = {}
tag_names_default = _read_translation_file("en", "tag_names")
tag_names_lang = _read_translation_file(language_code, "tag_names")
if tag_field:
for tag in tag_field["tags"]:
if tag in tag_names_lang:
tag_names[tag] = tag_names_lang[tag]
else:
tag_names[tag] = tag_names_default[tag]
return tag_names
def get_tag_choices(language_code: str = "en"):
"""
Function that gets all the tags, preferably translated to the user's preferred
language, otherwise the default language
Parameters
----------
language_code: str
The language code the user has selected on TWL's settings
Returns
-------
tuple
"""
tag_choices = []
tag_names_default = _read_translation_file("en", "tag_names")
tag_names_lang = _read_translation_file(language_code, "tag_names")
for tag_key, tag_value in tag_names_default.items():
lang_keys = tag_names_lang.keys()
if tag_key in lang_keys:
tag_tuple = (tag_key, tag_names_lang[tag_key])
else:
tag_tuple = (tag_key, tag_value)
tag_choices.append(tag_tuple)
tag_choices.sort(key=lambda a: a[1])
TAG_CHOICES = tuple(tag_choices)
return TAG_CHOICES
def get_tag_dict(language_code: str = "en"):
"""
Function that gets all the tags in the form of a dictionary, preferably
translated to the user's preferred language, otherwise the default language
Parameters
----------
language_code: str
The language code the user has selected on TWL's settings
Returns
-------
dict
"""
tag_dict = {}
sorted_tags = {}
tag_names_default = _read_translation_file("en", "tag_names")
tag_names_lang = _read_translation_file(language_code, "tag_names")
for tag_key, tag_value in tag_names_default.items():
lang_keys = tag_names_lang.keys()
if tag_key in lang_keys:
tag_dict[tag_key] = tag_names_lang[tag_key]
else:
tag_dict[tag_key] = tag_value
sorted_tuples = sorted(tag_dict.items(), key=lambda item: item[1])
sorted_tags = {k: v for k, v in sorted_tuples}
return sorted_tags
def _read_translation_file(language_code: str, filename: str):
"""
Reads a partner description file and returns a dictionary, if the file exists
----------
language_code: str
The language code the user has selected in their settings
filename: str
The name of the translation file you want to open (partner descriptions or tags)
Returns
-------
dict
"""
twlight_home = settings.TWLIGHT_HOME
filepath = "{twlight_home}/locale/{language_code}/{filename}.json".format(
twlight_home=twlight_home, language_code=language_code, filename=filename
)
if os.path.isfile(filepath):
with open(filepath, "r") as translation_file:
translation_dict = json.load(translation_file)
# Remove the "@metadata" key from the dictionary
if "@metadata" in translation_dict:
translation_dict.pop("@metadata")
return translation_dict
else:
return {}
def _get_any_description(
partner_default_descriptions_dict: dict,
partner_descriptions_dict: dict,
partner_key: str,
):
"""
Returns either the default partner description or the partner description in the
user's language of choice
Parameters
----------
partner_default_descriptions_dict : dict
The default descriptions dictionary.
partner_descriptions_dict : dict
The descriptions dictionary with descriptions in the user's preferred language
partner_key: str
The description key we are looking for
Returns
-------
str or None
"""
if partner_key in partner_descriptions_dict.keys():
return partner_descriptions_dict[partner_key]
elif partner_key in partner_default_descriptions_dict.keys():
return partner_default_descriptions_dict[partner_key]
else:
return None
def get_tags_json_schema():
"""
JSON Schema for tag names
"""
tags_json = _read_translation_file("en", "tag_names")
tag_keys = list(tags_json.keys())
number_of_tags = len(tag_keys)
JSON_SCHEMA_TAGS = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"properties": {
"tags": {
"$id": "#/properties/tags",
"type": "array",
"items": {
"$id": "#/properties/tags/items",
"enum": tag_keys,
"type": "string",
"examples": ["biology_tag", "military_tag"],
},
"maxItems": number_of_tags,
}
},
"additionalProperties": False,
"required": ["tags"],
}
return JSON_SCHEMA_TAGS
def get_median(values_list):
"""Given a list (of numbers), returns its median value."""
try:
for item in values_list:
assert isinstance(item, Number)
except AssertionError:
return 0
values_list.sort()
list_len = len(values_list)
if list_len < 1:
# Mathematically bogus, but will make graph display correctly.
median = 0
elif list_len % 2 == 1:
median = int(values_list[(list_len - 1) // 2])
else:
median = int(
(values_list[(list_len - 1) // 2] + values_list[1 + (list_len - 1) // 2])
// 2
)
return median
|
|
import json
from fjord.base.tests import (
AnalyzerProfileFactory,
LocalizingClient,
TestCase,
reverse,
)
from fjord.feedback.tests import ResponseFactory
class TestTriggerRuleMatchViewAPI(TestCase):
client_class = LocalizingClient
def test_not_logged_in(self):
data = {}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 403
def test_empty_tr(self):
feedback_responses = ResponseFactory.create_batch(5)
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
data = {
'locales': [],
'products': [],
'versions': [],
'keywords': [],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
# Note: This matches everything because it's an empty rule.
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fr.id for fr in reversed(feedback_responses)]
)
def test_invalid_data(self):
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data='[}'
)
assert resp.status_code == 400
def test_locales(self):
ResponseFactory(locale=u'fr')
es_resp = ResponseFactory(locale=u'es')
enus_resp = ResponseFactory(locale=u'en-US')
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
# Test one locale
data = {
'locales': [u'en-US'],
'products': [],
'versions': [],
'keywords': [],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[enus_resp.id]
)
# Test two
data['locales'] = [u'en-US', u'es']
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[enus_resp.id, es_resp.id]
)
def test_products(self):
fx_resp = ResponseFactory(product=u'Firefox')
fxa_resp = ResponseFactory(product=u'Firefox for Android')
ResponseFactory(product=u'Loop')
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
# Test one product
data = {
'locales': [],
'products': [u'Firefox'],
'versions': [],
'keywords': [],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fx_resp.id]
)
# Test two
data['products'] = [u'Firefox', u'Firefox for Android']
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fxa_resp.id, fx_resp.id]
)
def test_versions(self):
te_resp = ResponseFactory(version=u'38.0')
teof_resp = ResponseFactory(version=u'38.0.5')
ResponseFactory(version=u'39.0')
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
# Test one version
data = {
'locales': [],
'products': [],
'versions': [u'38.0'],
'keywords': [],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[te_resp.id]
)
# Test two
data['versions'] = [u'38.0', u'38.0.5']
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[teof_resp.id, te_resp.id]
)
# Test prefix
data['versions'] = [u'38*']
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[teof_resp.id, te_resp.id]
)
def test_keywords(self):
rte_resp = ResponseFactory(description=u'Ride the lightning')
fwtbt_resp = ResponseFactory(description=u'For whom the bell tolls')
ResponseFactory(description=u'The thing that should not be')
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
# Test one keyword
data = {
'locales': [],
'products': [],
'versions': [],
'keywords': [u'lightning'],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[rte_resp.id]
)
# Test two
data['keywords'] = [u'lightning', u'tolls']
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fwtbt_resp.id, rte_resp.id]
)
# Test phrase
data['keywords'] = [u'bell tolls']
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fwtbt_resp.id]
)
def test_url_exists(self):
fb1 = ResponseFactory(url=u'')
fb2 = ResponseFactory(url=u'http://example.com')
fb3 = ResponseFactory(url=u'http://example.com')
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
# Test don't care
data = {
'locales': [],
'products': [],
'versions': [],
'keywords': [],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fb3.id, fb2.id, fb1.id]
)
# Test has a url
data['url_exists'] = True
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fb3.id, fb2.id]
)
# Test does not have a url
data['url_exists'] = False
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
assert (
[item['id'] for item in json.loads(resp.content)['results']] ==
[fb1.id]
)
def test_contents(self):
fr = ResponseFactory()
jane = AnalyzerProfileFactory().user
self.client_login_user(jane)
data = {
'locales': [],
'products': [],
'versions': [],
'keywords': [],
'url_exists': None
}
resp = self.client.post(
reverse('triggerrule-match'),
content_type='application/json',
data=json.dumps(data)
)
assert resp.status_code == 200
content = json.loads(resp.content)
assert (
content['results'] ==
[
{
u'id': int(fr.id),
u'created': fr.created.strftime(u'%Y-%m-%dT%H:%M:%S'),
u'description': fr.description,
u'happy': fr.happy,
u'locale': fr.locale,
u'product': fr.product,
u'platform': fr.platform,
u'url': fr.url,
u'version': fr.version
}
]
)
|
|
#!/usr/bin/env python3
# Python implementations of HALCON's radial distortion models.
# These are intended to work both element-wise for numpy arrays,
# and also for scalars.
# Transcribed from the documentation for the "calibrate_cameras" HALCON operator.
#
# I performed some manual common subexpresson elimination since
# numpy isn't a compiler and won't do CSE.
import numpy
from numpy import sqrt
from numba import jit
halcon_model_types = ['area_scan_division', 'area_scan_polynomial']
@jit
def undistort_division(u_tilde, v_tilde, kappa):
"""
From the HALCON Docs:
The division model uses one parameter () to model the radial distortions.
The following equations transform the distorted image plane coordinates into undistorted image plane coordinates if the division model is used.
u_tilde and v_tilde are in camera coordinates, usually in meters, at a distance of one focal length
from the center of projection.
"""
r_tilde_squared = u_tilde**2 + v_tilde**2
scaling = 1.0 / (1.0 + kappa * r_tilde_squared)
u = scaling * u_tilde
v = scaling * v_tilde
return u, v
@jit
def distort_division(u, v, kappa):
"""
From the HALCON Docs:
These equations can be inverted analytically, which leads to the following equations that transform undistorted coordinates into distorted coordinates:
kappa = 0 means no distortion.
Warning! The units of u,v and kappa must be compatible!
My understanding is that HALCON Kappa works with u,v in meters!
"""
r_squared = u**2 + v**2
temp = 1.0 - 4.0 * kappa * r_squared
scaling = 2.0 / (1.0 + sqrt(temp))
#print('scaling=',scaling)
u_tilde = scaling * u
v_tilde = scaling * v
return u_tilde, v_tilde
@jit
def undistort_halcon_polynomial(u_tilde, v_tilde, k1, k2, k3, p1, p2):
"""
From the HALCON Docs:
The polynomial model uses three parameters () to model
the radial distortions and two parameters () to model
the decentering distortions.
The following equations transform the distorted image
plane coordinates into undistorted image plane coordinates
if the polynomial model is used:
These equations cannot be inverted analytically. Therefore,
distorted image plane coordinates must be calculated from
undistorted image plane coordinates numerically.
k1=k2=k3=p1=p2=0 means no distortion.
"""
u_tilde_to_2 = u_tilde**2
v_tilde_to_2 = v_tilde**2
r_to_2 = u_tilde_to_2 + v_tilde_to_2
r_to_4 = r_to_2**2
r_to_6 = r_to_4 * r_to_2
temp1 = k1 * r_to_2
temp1 += k2 * r_to_4
temp1 += k3 * r_to_6
u = u_tilde + u_tilde * temp1 # the radial part
v = v_tilde + v_tilde * temp1
uv_tilde = u_tilde * v_tilde
u += p1 * (r_to_2 + 2 * u_tilde_to_2) + 2 * p2 * uv_tilde # The tilt part
v += 2 * p1 * uv_tilde + p2 * (r_to_2 + 2 * v_tilde_to_2)
return u,v
@jit
def distort_halcon_polynomial(u,v,k1,k2,k3,p1,p2):
#u_tilde,v_tilde = undistort_polynomial(u, v, k1, k2, k3, p1, p2)
assert False, "Not implemented yet. Requires iterative solution"
return u_tilde,v_tilde
def project_and_distort_simple(point_3d, camera_parameters):
""" Simplified api that takes a 3d point, applies extrinsics, intrinsics,
and radial distortion to return a 2d image point corresponding to a 3d point.
This implementation is not for speed critical use; see project_and_distort. """
R = camera_parameters['R']
T = camera_parameters['T']
kappa = camera_parameters['kappa']
f_mm = camera_parameters['f'] * 1000.0 # Because project_and_distort takes mm
image_height = camera_parameters['image_height']
image_width = camera_parameters['image_width']
pixel_h_mm = camera_parameters['pixel_h'] * 1000.0
pixel_w_mm = camera_parameters['pixel_w'] * 1000.0
cx = camera_parameters['cx']
cy = camera_parameters['cy']
x,y,z = numpy.dot(R,point_3d) + T
assert z*1000.0 > f_mm, 'A voxel is behind the image plane of a camera!'
u, v = project_and_distort(x, y, z, f_mm, image_height, image_width,
pixel_h_mm, pixel_w_mm, cx, cy, kappa)
return u,v
@jit
def project_and_distort(x, y, z, f_mm, sensor_h, sensor_w, pixel_h_mm, pixel_w_mm, cx,
cy, kappa):
""" Project a 3D point into a sensor plane and
simulate lens distortion to get (sub)pixel coordinates.
This applies the camera's intrinsic / internal parameters.
The caller is responsible for first applying the cameras extrinsic / external parameters
to get the point's location in the camera frame.
Note that the definition of the projection/distortion process is consistent with HALCON,
but there exist other conventions, so be careful!. i.e. HALCON's radial distortion is applied
to coordinates in the image plane (in m); I have seen other groups do radial distortion on the
pixel coordinates.
Inputs:
x,y,z - real world coordinates in the camera frame. Scale doesn't matter.
f_mm - focal length of the lens in mm! This is to be consistent with giplib.
sensor_h,sensor_w - height and width of the sensor in pixels
pixel_h,pixel_w - height and width of a sensor pixel in mm! This is to be consistent with giplib.
cx,cy - the center of the sensor optical axis in pixels
kappa - the division model radial distortion parameter as defined by halcon. """
f_meters = f_mm * 0.001
x_projected = x * f_meters / z # meters
y_projected = y * f_meters / z
# z_projected = f_meters
# Apply radial distortion in the image plane
u = x_projected # meters
v = y_projected
#if kappa != 0.0 and kappa != -0.0:
#print('Non-zero Kappa! Applying radial distortion!')
#u_tilde, v_tilde = distort_division(u, v, kappa)
#else:
#u_tilde, v_tilde = u, v
u_tilde, v_tilde = distort_division(u, v, kappa) # meters
# Convert to pixel (sub) coordinates
u_pixel = u_tilde / (pixel_w_mm * .001) + cx # pixels
v_pixel = v_tilde / (pixel_h_mm * .001) + cy
#assert False
return u_pixel, v_pixel
def triangulate(camera_point_tuples):
""" Takes (camera parameters, 2d point) tuples and computes the closest
3d point to the viewing rays.
This is solves the inverse problem of project_and_distort_simple."""
points_on_the_lines = []
direction_vectors = []
assert(len(camera_point_tuples)) >= 2, 'There are not enough points to triangulate!'
for camera_parameters,(u_pixel,v_pixel) in camera_point_tuples:
# Extract two points per ray in world coordinates.
R = camera_parameters['R'] # R,T, map world coorinates into camera coordinates
T = camera_parameters['T']
R_,T_ = R.T,numpy.dot(-R.T,T) # map camera coordinates into world coordinates
kappa = camera_parameters['kappa']
f_m = camera_parameters['f'] # Because project_and_distort takes mm
image_height = camera_parameters['image_height']
image_width = camera_parameters['image_width']
pixel_h_m = camera_parameters['pixel_h']
pixel_w_m = camera_parameters['pixel_w']
cx = camera_parameters['cx']
cy = camera_parameters['cy']
u_tilde = (u_pixel - cx) * pixel_w_m # meters
v_tilde = (v_pixel - cy) * pixel_h_m
u,v = undistort_division(u_tilde, v_tilde, kappa) # meters
#point1_camera = numpy.array([0,0,0])
point1_world = camera_center = T_ # in world coordinates since R_*[[0],[0],[0]] + T_ = T_
point2_camera = numpy.array((u,v,f_m))
point2_world = numpy.dot(R_,point2_camera)+T_
direction_world = point2_world - point1_world
points_on_the_lines.append(point1_world)
direction_vectors.append(direction_world)
from plaroma3d.camera_geometry import point_closest_to_several_lines
point_3d_world = point_closest_to_several_lines(points=points_on_the_lines,
directions=direction_vectors)
point_to_line_distances = []
for p,d in zip(points_on_the_lines, direction_vectors):
n = d / numpy.linalg.norm(d) # normalized direction
point_3d_p = point_3d_world - p # center on a point on the line
# To compute distance, consider right triangle formed by the projection...
distance = numpy.sqrt(numpy.dot(point_3d_p,point_3d_p) - (numpy.dot(point_3d_p,n))**2)
point_to_line_distances.append(distance)
return point_3d_world, point_to_line_distances
# Check division model invertibility for a specific camera
def check_distortion_model_invertability(intrinsics):
u_grid = numpy.arange(intrinsics['ImageWidth']).astype(numpy.float64)
v_grid = numpy.arange(intrinsics['ImageHeight']).astype(numpy.float64)
u, v= numpy.meshgrid(u_grid,v_grid,sparse=False)
u -= intrinsics['Cx']
v -= intrinsics['Cy']
u *= intrinsics['Sx'] # in the HALCON .dat files it seems the values are in meters
v *= intrinsics['Sy']
kappa = intrinsics['Kappa']
# Distort and undistort
u_tilde,v_tilde = distort_division(u,v,kappa)
from numpy import all, abs
assert not all(abs(u_tilde-u)<intrinsics['Sx']), 'Warning: Distortion is at most sub-pixel! Probably a bug!'
assert not all(abs(v_tilde-v)<intrinsics['Sy']), 'Warning: Distortion is at most sub-pixel! Probably a bug!'
u2,v2 = undistort_division(u_tilde,v_tilde,kappa)
eps = .001*intrinsics['Sx'] # a thousandth of a pixel
assert all(abs(u-u2)<eps) and all(abs(v-v2)<eps), 'Camera intrinsics are not invertible on the image domain!'
# Undistort then Distort
u_tilde,v_tilde = undistort_division(u,v,kappa)
assert not all(abs(u_tilde-u)<intrinsics['Sx']), 'Warning: Distortion is at most sub-pixel! Probably a bug!'
assert not all(abs(v_tilde-v)<intrinsics['Sy']), 'Warning: Distortion is at most sub-pixel! Probably a bug!'
u2,v2 = distort_division(u_tilde,v_tilde,kappa)
assert all(abs(u-u2)<eps) and all(abs(v-v2)<eps), 'Camera intrinsics are not invertible on the image domain!'
if __name__=='__main__':
test_division_model_invertability(1)
test_division_model_invertability(20)
|
|
###############################################################################
# Copyright (c) 2018-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Utilities for applying calibration solutions to visibilities and weights."""
import logging
import dask.array as da
import numba
import numpy as np
from .categorical import CategoricalData, ComparableArrayWrapper
from .flags import POSTPROC
from .sensordata import SensorGetter, SimpleSensorGetter
from .spectral_window import SpectralWindow
# A constant indicating invalid / absent gain (typically due to flagged data)
INVALID_GAIN = np.complex64(complex(np.nan, np.nan))
# All the calibration product types katdal knows about
CAL_PRODUCT_TYPES = ('K', 'B', 'G', 'GPHASE', 'GAMP_PHASE')
logger = logging.getLogger(__name__)
def complex_interp(x, xi, yi, left=None, right=None):
"""Piecewise linear interpolation of magnitude and phase of complex values.
Given discrete data points (`xi`, `yi`), this returns a 1-D piecewise
linear interpolation `y` evaluated at the `x` coordinates, similar to
`numpy.interp(x, xi, yi)`. While :func:`numpy.interp` interpolates the real
and imaginary parts of `yi` separately, this function interpolates
magnitude and (unwrapped) phase separately instead. This is useful when the
phase of `yi` changes more rapidly than its magnitude, as in electronic
gains.
Parameters
----------
x : 1-D sequence of float, length *M*
The x-coordinates at which to evaluate the interpolated values
xi : 1-D sequence of float, length *N*
The x-coordinates of the data points, must be sorted in ascending order
yi : 1-D sequence of complex, length *N*
The y-coordinates of the data points, same length as `xi`
left : complex, optional
Value to return for `x < xi[0]`, default is `yi[0]`
right : complex, optional
Value to return for `x > xi[-1]`, default is `yi[-1]`
Returns
-------
y : array of complex, length *M*
The evaluated y-coordinates, same length as `x` and same dtype as `yi`
"""
# Extract magnitude and unwrapped phase
mag_i = np.abs(yi)
phase_i = np.unwrap(np.angle(yi))
# Prepare left and right interpolation extensions
mag_left = phase_left = mag_right = phase_right = None
if left is not None:
mag_left = np.abs(left)
with np.errstate(invalid='ignore'):
phase_left = np.unwrap([phase_i[0], np.angle(left)])[1]
if right is not None:
mag_right = np.abs(right)
with np.errstate(invalid='ignore'):
phase_right = np.unwrap([phase_i[-1], np.angle(right)])[1]
# Interpolate magnitude and phase separately, and reassemble
mag = np.interp(x, xi, mag_i, left=mag_left, right=mag_right)
phase = np.interp(x, xi, phase_i, left=phase_left, right=phase_right)
y = np.empty_like(phase, dtype=np.complex128)
np.cos(phase, out=y.real)
np.sin(phase, out=y.imag)
y *= mag
return y.astype(yi.dtype)
def _parse_cal_product(cal_product):
"""Split `cal_product` into `cal_stream` and `product_type` parts."""
fields = cal_product.rsplit('.', 1)
if len(fields) != 2:
raise ValueError(f'Calibration product {cal_product} is not in the format '
'<cal_stream>.<product_type>')
return fields[0], fields[1]
def get_cal_product(cache, cal_stream, product_type):
"""Extract calibration solution from cache as a sensor.
Parameters
----------
cache : :class:`~katdal.sensordata.SensorCache` object
Sensor cache serving cal product sensors
cal_stream : string
Name of calibration stream (e.g. "l1")
product_type : string
Calibration product type (e.g. "G")
"""
sensor_name = f'Calibration/Products/{cal_stream}/{product_type}'
return cache.get(sensor_name)
def calc_delay_correction(sensor, index, data_freqs):
"""Calculate correction sensor from delay calibration solution sensor.
Given the delay calibration solution `sensor`, this extracts the delay time
series of the input specified by `index` (in the form (pol, ant)) and
builds a categorical sensor for the corresponding complex correction terms
(channelised by `data_freqs`).
Invalid delays (NaNs) are replaced by zeros, since bandpass calibration
still has a shot at fixing any residual delay.
"""
delays = [np.nan_to_num(value[index]) for segm, value in sensor.segments()]
# Delays produced by cal pipeline are raw phase slopes, i.e. exp(2 pi j d f)
corrections = [np.exp(-2j * np.pi * d * data_freqs).astype('complex64')
for d in delays]
corrections = [ComparableArrayWrapper(c) for c in corrections]
return CategoricalData(corrections, sensor.events)
def calc_bandpass_correction(sensor, index, data_freqs, cal_freqs):
"""Calculate correction sensor from bandpass calibration solution sensor.
Given the bandpass calibration solution `sensor`, this extracts the time
series of bandpasses (channelised by `cal_freqs`) for the input specified
by `index` (in the form (pol, ant)) and builds a categorical sensor for
the corresponding complex correction terms (channelised by `data_freqs`).
Invalid solutions (NaNs) are replaced by linear interpolations over
frequency (separately for magnitude and phase), as long as some channels
have valid solutions.
"""
corrections = []
for segment, value in sensor.segments():
bp = value[(slice(None),) + index]
valid = np.isfinite(bp)
if valid.any():
# Don't extrapolate to edges of band where gain typically drops off
bp = complex_interp(data_freqs, cal_freqs[valid], bp[valid],
left=INVALID_GAIN, right=INVALID_GAIN)
else:
bp = np.full(len(data_freqs), INVALID_GAIN)
corrections.append(ComparableArrayWrapper(np.reciprocal(bp)))
return CategoricalData(corrections, sensor.events)
def calc_gain_correction(sensor, index, targets=None):
"""Calculate correction sensor from gain calibration solution sensor.
Given the gain calibration solution `sensor`, this extracts the time
series of gains for the input specified by `index` (in the form (pol, ant))
and interpolates them over time to get the corresponding complex correction
terms. The optional `targets` parameter is a :class:`CategoricalData` i.e.
a sensor indicating the target associated with each dump. The targets can
be actual :class:`katpoint.Target` objects or indices, as long as they
uniquely identify the target. If provided, interpolate solutions derived
from one target only at dumps associated with that target, which is what
you want for self-calibration solutions (but not for standard calibration
based on gain calibrator sources).
Invalid solutions (NaNs) are replaced by linear interpolations over time
(separately for magnitude and phase), as long as some dumps have valid
solutions on the appropriate target.
"""
dumps = np.arange(sensor.events[-1])
events = []
gains = []
for segment, value in sensor.segments():
# Discard "invalid gain" placeholder (typically the initial value)
if value is INVALID_GAIN:
continue
events.append(segment.start)
gains.append(value[(Ellipsis,) + index])
if not events:
return np.full((len(dumps), 1), INVALID_GAIN)
events = np.array(events)
# Let the gains be shaped either (cal_n_chans, n_events) or (1, n_events)
gains = np.atleast_2d(np.array(gains).T)
# Assume all dumps have the same target by default, i.e. interpolate freely
if targets is None:
targets = CategoricalData([0], [0, len(dumps)])
smooth_gains = np.full((len(dumps), gains.shape[0]), INVALID_GAIN)
# Iterate over number of channels / "IFs" / subbands in gain product
for target in targets.unique_values:
on_target = (targets == target)
for chan, gains_per_chan in enumerate(gains):
valid = np.isfinite(gains_per_chan) & on_target[events]
if valid.any():
smooth_gains[on_target, chan] = complex_interp(
dumps[on_target], events[valid], gains_per_chan[valid])
return np.reciprocal(smooth_gains)
def calibrate_flux(sensor, targets, gaincal_flux):
"""Apply flux scale to calibrator gains (aka flux calibration).
Given the gain calibration solution `sensor`, this identifies the target
associated with each set of solutions by looking up the gain events in the
`targets` sensor, and then scales the gains by the inverse square root of
the relevant flux if a valid match is found in the `gaincal_flux` dict. This
is equivalent to the final step of the AIPS GETJY and CASA fluxscale tasks.
"""
# If no calibration info is available, do nothing
if not gaincal_flux:
return sensor
calibrated_gains = []
for segment, gains in sensor.segments():
# Ignore "invalid gain" placeholder (typically the initial value)
if gains is INVALID_GAIN:
calibrated_gains.append(ComparableArrayWrapper(gains))
continue
# Find the target at the time of the gain solution (i.e. gain calibrator)
target = targets[segment.start]
for name in [target.name] + target.aliases:
flux = gaincal_flux.get(name, np.nan)
# Scale the gains if a valid flux density was found for this target
if flux > 0.0:
calibrated_gains.append(ComparableArrayWrapper(gains / np.sqrt(flux)))
break
else:
calibrated_gains.append(ComparableArrayWrapper(gains))
return CategoricalData(calibrated_gains, sensor.events)
def add_applycal_sensors(cache, attrs, data_freqs, cal_stream, cal_substreams=None,
gaincal_flux={}):
"""Register virtual sensors for one calibration stream.
This operates on a single calibration stream called `cal_stream` (possibly
an alias), which derives from one or more underlying cal streams listed in
`cal_substreams` and has stream attributes in `attrs`.
The first set of virtual sensors maps all cal products into a unified
namespace (template 'Calibration/Products/`cal_stream`/{product_type}').
Map receptor inputs to the relevant indices in each calibration product
based on the ants and pols found in `attrs`. Then register a virtual sensor
per product type and per input in the SensorCache `cache`, with template
'Calibration/Corrections/`cal_stream`/{product_type}/{inp}'. The virtual
sensor function picks the appropriate correction calculator based on the
cal product type, which also uses auxiliary info like the channel
frequencies, `data_freqs`.
Parameters
----------
cache : :class:`~katdal.sensordata.SensorCache` object
Sensor cache serving cal product sensors and receiving correction sensors
attrs : dict-like
Calibration stream attributes (e.g. a "cal" telstate view)
data_freqs : array of float, shape (*F*,)
Centre frequency of each frequency channel of visibilities, in Hz
cal_stream : string
Name of (possibly virtual) calibration stream (e.g. "l1")
cal_substreams : sequence of string, optional
Names of actual underlying calibration streams (e.g. ["cal"]),
defaults to [`cal_stream`] itself
gaincal_flux : dict mapping string to float, optional
Flux density (in Jy) per gaincal target name, used to flux calibrate
the "G" product, overriding the measured flux stored in `attrs`
(if available). A value of None disables flux calibration.
Returns
-------
cal_freqs : 1D array of float, or None
Centre frequency of each frequency channel of calibration stream, in Hz
(or None if no sensors were registered)
"""
if cal_substreams is None:
cal_substreams = [cal_stream]
cal_ants = attrs.get('antlist', [])
cal_pols = attrs.get('pol_ordering', [])
cal_input_map = {ant + pol: (pol_idx, ant_idx)
for (pol_idx, pol) in enumerate(cal_pols)
for (ant_idx, ant) in enumerate(cal_ants)}
if not cal_input_map:
return
try:
cal_spw = SpectralWindow(attrs['center_freq'], None,
attrs['n_chans'], sideband=1,
bandwidth=attrs['bandwidth'])
cal_freqs = cal_spw.channel_freqs
except KeyError:
logger.warning("Disabling cal stream '%s' due to missing "
"spectral attributes", cal_stream)
return
targets = cache.get('Observation/target')
# Override pipeline fluxes (or disable flux calibration)
if gaincal_flux is None:
gaincal_flux = {}
else:
measured_flux = attrs.get('measured_flux', {}).copy()
measured_flux.update(gaincal_flux)
gaincal_flux = measured_flux
def indirect_cal_product_name(name, product_type):
# XXX The first underscore below is actually a telstate separator...
return name.split('/')[-2] + '_product_' + product_type
def indirect_cal_product_raw(cache, name, product_type):
# XXX The first underscore below is actually a telstate separator...
product_str = '_product_' + product_type
raw_products = []
for stream in cal_substreams:
sensor_name = stream + product_str
raw_product = cache.get(sensor_name, extract=False)
assert isinstance(raw_product, SensorGetter), \
sensor_name + ' is already extracted'
raw_products.append(raw_product)
if len(raw_products) == 1:
return raw_products[0]
else:
raw_products = [raw.get() for raw in raw_products]
timestamps = np.concatenate([raw_product.timestamp for raw_product in raw_products])
values = np.concatenate([raw_product.value for raw_product in raw_products])
ordered = timestamps.argsort()
timestamps = timestamps[ordered]
values = values[ordered]
return SimpleSensorGetter(indirect_cal_product_name(name, product_type),
timestamps, values)
def indirect_cal_product(cache, name, product_type):
try:
n_parts = int(attrs[f'product_{product_type}_parts'])
except KeyError:
return indirect_cal_product_raw(cache, name, product_type)
# Handle multi-part cal product (as produced by "split cal")
# First collect all the parts as sensors (and mark missing ones as None)
parts = []
for n in range(n_parts):
try:
part = indirect_cal_product_raw(cache, name + str(n), product_type + str(n))
except KeyError:
part = SimpleSensorGetter(name + str(n), np.array([]), np.array([]))
parts.append(part)
# Stitch together values with the same timestamp
parts = [part.get() for part in parts]
timestamps = []
values = []
part_indices = [0] * n_parts
part_timestamps = [
part.timestamp[0] if len(part.timestamp) else np.inf
for part in parts
]
while True:
next_timestamp = min(part_timestamps)
if next_timestamp == np.inf:
break
pieces = []
for ts, ind, part in zip(part_timestamps, part_indices, parts):
if ts == next_timestamp:
piece = ComparableArrayWrapper.unwrap(part.value[ind])
pieces.append(piece)
else:
pieces.append(None)
if any(piece is None for piece in pieces):
invalid = np.full_like(piece, INVALID_GAIN)
pieces = [piece if piece is not None else invalid for piece in pieces]
timestamps.append(next_timestamp)
value = np.concatenate(pieces, axis=0)
values.append(ComparableArrayWrapper(value))
for i, part in enumerate(parts):
if part_timestamps[i] == next_timestamp:
ts = part.timestamp
part_indices[i] += 1
part_timestamps[i] = ts[part_indices[i]] if part_indices[i] < len(ts) else np.inf
if not timestamps:
raise KeyError(f"No cal product '{name}' parts found (expected {n_parts})")
return SimpleSensorGetter(indirect_cal_product_name(name, product_type),
np.array(timestamps), np.array(values))
def calc_correction_per_input(cache, name, inp, product_type):
"""Calculate correction sensor for input `inp` from cal solutions."""
product_sensor = get_cal_product(cache, cal_stream, product_type)
try:
index = cal_input_map[inp]
except KeyError:
raise KeyError(f"No calibration solutions available for input '{inp}' - "
f'available ones are {sorted(cal_input_map.keys())}')
if product_type == 'K':
correction_sensor = calc_delay_correction(product_sensor, index,
data_freqs)
elif product_type == 'B':
correction_sensor = calc_bandpass_correction(product_sensor, index,
data_freqs, cal_freqs)
elif product_type == 'G':
product_sensor = calibrate_flux(product_sensor, targets, gaincal_flux)
correction_sensor = calc_gain_correction(product_sensor, index)
elif product_type in ('GPHASE', 'GAMP_PHASE'):
correction_sensor = calc_gain_correction(product_sensor, index, targets)
else:
raise KeyError(f"Unknown calibration product type '{product_type}' - "
f'available ones are {CAL_PRODUCT_TYPES}')
cache[name] = correction_sensor
return correction_sensor
template = f'Calibration/Products/{cal_stream}/{{product_type}}'
cache.virtual[template] = indirect_cal_product
template = f'Calibration/Corrections/{cal_stream}/{{product_type}}/{{inp}}'
cache.virtual[template] = calc_correction_per_input
return cal_freqs
@numba.jit(nopython=True, nogil=True)
def _correction_inputs_to_corrprods(g_per_cp, g_per_input, input1_index, input2_index):
"""Convert gains per input to gains per correlation product."""
for i in range(g_per_cp.shape[0]):
for j in range(g_per_cp.shape[1]):
g_per_cp[i, j] = (g_per_input[i, input1_index[j]]
* np.conj(g_per_input[i, input2_index[j]]))
class CorrectionParams:
"""Data needed to compute corrections in :func:`calc_correction_per_corrprod`.
Once constructed, the data in this class must not be modified, as it will
be baked into dask graphs.
Parameters
----------
inputs : list of str
Names of inputs, in the same order as the input axis of products
input1_index, input2_index : array of int
Indices into `inputs` of first and second items of correlation product
corrections : dict
A dictionary (indexed by cal product name) of lists (indexed
by input) of sequences (indexed by dump) of numpy arrays, with
corrections to apply.
channel_maps : dict
A dictionary (indexed by cal product name) of functions (signature
`g = channel_map(g, channels)`) that map the frequency axis of the
cal product `g` onto the frequency axis of the visibility data, where
the vis frequency axis will be indexed by the slice `channels`.
"""
def __init__(self, inputs, input1_index, input2_index, corrections, channel_maps):
self.inputs = inputs
self.input1_index = input1_index
self.input2_index = input2_index
self.corrections = corrections
self.channel_maps = channel_maps
def calc_correction_per_corrprod(dump, channels, params):
"""Gain correction per channel per correlation product for a given dump.
This calculates an array of complex gain correction terms of shape
(n_chans, n_corrprods) that can be directly applied to visibility data.
This incorporates all requested calibration products at the specified
dump and channels.
Parameters
----------
dump : int
Dump index (applicable to full data set, i.e. absolute)
channels : slice
Channel indices (applicable to full data set, i.e. absolute)
params : :class:`CorrectionParams`
Corrections per input, together with correlation product indices
Returns
-------
gains : array of complex64, shape (n_chans, n_corrprods)
Gain corrections per channel per correlation product
Raises
------
KeyError
If input and/or cal product has no associated correction
"""
n_channels = channels.stop - channels.start
g_per_input = np.ones((len(params.inputs), n_channels), dtype='complex64')
for cal_product, product_corrections in params.corrections.items():
channel_map = params.channel_maps[cal_product]
for i in range(len(params.inputs)):
sensor = product_corrections[i]
g_per_channel = sensor[dump]
g_per_input[i] *= channel_map(g_per_channel, channels)
# Transpose to (channel, input) order, and ensure C ordering
g_per_input = np.ascontiguousarray(g_per_input.T)
g_per_cp = np.empty((n_channels, len(params.input1_index)), dtype='complex64')
_correction_inputs_to_corrprods(g_per_cp, g_per_input,
params.input1_index, params.input2_index)
return g_per_cp
def _correction_block(block_info, params):
"""Calculate applycal correction for a single time-freq-baseline chunk."""
slices = tuple(slice(*loc) for loc in block_info[None]['array-location'])
block_shape = block_info[None]['chunk-shape']
correction = np.empty(block_shape, np.complex64)
# TODO: make calc_correction_per_corrprod multi-dump aware
for n, dump in enumerate(range(slices[0].start, slices[0].stop)):
correction[n] = calc_correction_per_corrprod(dump, slices[1], params)
return correction
def calc_correction(chunks, cache, corrprods, cal_products, data_freqs,
all_cal_freqs, skip_missing_products=False):
"""Create a dask array containing applycal corrections.
Parameters
----------
chunks : tuple of tuple of int
Chunking scheme of the resulting array, in normalized form (see
:func:`dask.array.core.normalize_chunks`).
cache : :class:`~katdal.sensordata.SensorCache` object
Sensor cache, used to look up individual correction sensors
corrprods : sequence of (string, string)
Selected correlation products as pairs of correlator input labels
cal_products : sequence of string
Calibration products that will contribute to corrections (e.g. ["l1.G"])
data_freqs : array of float, shape (*F*,)
Centre frequency of each frequency channel of visibilities, in Hz
all_cal_freqs : dict
Dictionary mapping cal stream name (e.g. "l1") to array of associated
frequencies
skip_missing_products : bool
If True, skip products with missing sensors instead of raising KeyError
Returns
-------
final_cal_products : list of string
List of calibration products in the order that they will be applied
(potentially a subset of `cal_products` if skipping missing products)
corrections : :class:`dask.array.Array` object, or None
Dask array that produces corrections for entire vis array, or `None` if
no calibration products were found (either `cal_products` is empty or all
products had some missing sensors and `skip_missing_products` is True)
Raises
------
KeyError
If a correction sensor for a given input and cal product is not found
(and `skip_missing_products` is False)
"""
shape = tuple(sum(bd) for bd in chunks)
if len(chunks[2]) > 1:
logger.warning('ignoring chunking on baseline axis')
chunks = (chunks[0], chunks[1], (shape[2],))
inputs = sorted(set(np.ravel(corrprods)))
input1_index = np.array([inputs.index(cp[0]) for cp in corrprods])
input2_index = np.array([inputs.index(cp[1]) for cp in corrprods])
corrections = {}
channel_maps = {}
for cal_product in cal_products:
cal_stream, product_type = _parse_cal_product(cal_product)
sensor_prefix = f'Calibration/Corrections/{cal_stream}/{product_type}/'
corrections_per_product = []
for i, inp in enumerate(inputs):
try:
sensor = cache.get(sensor_prefix + inp)
except KeyError:
if skip_missing_products:
break
else:
raise
# Indexing CategoricalData by dump is relatively slow (tens of
# microseconds), so expand it into a plain-old Python list.
if isinstance(sensor, CategoricalData):
data = [None] * sensor.events[-1]
for s, v in sensor.segments():
for j in range(s.start, s.stop):
data[j] = v
else:
data = sensor
corrections_per_product.append(data)
else:
corrections[cal_product] = corrections_per_product
# Frequency configuration for *stream* (not necessarily for product)
cal_stream_freqs = all_cal_freqs[cal_stream]
# Get number of frequency channels of *corrections* by inspecting it
# at first dump for each input and picking max to reject bad inputs.
# Expected to be either 1, len(cal_stream_freqs) or len(data_freqs).
correction_n_chans = max([len(np.atleast_1d(corr_per_input[0]))
for corr_per_input in corrections_per_product])
if correction_n_chans == 1:
# Scalar values will be broadcast by NumPy - no slicing required
channel_maps[cal_product] = lambda g, channels: g
elif correction_n_chans == len(data_freqs) and (
# This test indicates that correction frequencies either differ
# from those of cal stream (i.e. already interpolated), or the
# cal stream matches the data freqs to within 1 mHz anyway.
len(cal_stream_freqs) != len(data_freqs)
or np.allclose(cal_stream_freqs, data_freqs, rtol=0, atol=1e-3)):
# Corrections are already lined up with data - slice directly
channel_maps[cal_product] = lambda g, channels: g[channels]
else:
# Pick closest cal channel for each data channel
expand = np.abs(data_freqs[:, np.newaxis]
- cal_stream_freqs[np.newaxis, :]).argmin(axis=-1)
channel_maps[cal_product] = lambda g, channels: g[expand[channels]]
final_cal_products = list(corrections.keys())
if not final_cal_products:
return final_cal_products, None
params = CorrectionParams(inputs, input1_index, input2_index,
corrections, channel_maps)
name = 'corrections[{}]'.format(','.join(sorted(final_cal_products)))
return (final_cal_products,
da.map_blocks(_correction_block, dtype=np.complex64, chunks=chunks,
name=name, params=params))
@numba.jit(nopython=True, nogil=True)
def apply_vis_correction(data, correction):
"""Clean up and apply `correction` to visibility data in `data`."""
out = np.empty_like(data)
for i in range(out.shape[0]):
for j in range(out.shape[1]):
for k in range(out.shape[2]):
c = correction[i, j, k]
if not np.isnan(c):
out[i, j, k] = data[i, j, k] * c
else:
out[i, j, k] = data[i, j, k]
return out
@numba.jit(nopython=True, nogil=True)
def apply_weights_correction(data, correction):
"""Clean up and apply `correction` to weight data in `data`."""
out = np.empty_like(data)
for i in range(out.shape[0]):
for j in range(out.shape[1]):
for k in range(out.shape[2]):
cc = correction[i, j, k]
c = cc.real * cc.real + cc.imag * cc.imag
if c > 0: # Will be false if c is NaN
out[i, j, k] = data[i, j, k] / c
else:
out[i, j, k] = 0
return out
@numba.jit(nopython=True, nogil=True)
def apply_flags_correction(data, correction):
"""Set POSTPROC flag wherever `correction` is invalid."""
out = np.copy(data)
for i in range(out.shape[0]):
for j in range(out.shape[1]):
for k in range(out.shape[2]):
if np.isnan(correction[i, j, k]):
out[i, j, k] |= POSTPROC
return out
|
|
"""
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
@author: Josiah Walker
"""
import numpy,random
from BlockSparseMatrix import BlockSparseMatrix
from BresenhamAlgorithms import BresenhamLine,BresenhamTriangle,BresenhamPolygon
#ranges all given in cm
SonarSensor = {"spread": 15.*numpy.pi/180., "range": 500., "phitfree": -0.3, "phitoccupied": 3.}
class GridMap:
"""
Sparse gridmap for 2D mapping.
"""
def __init__(self,scale=1.0):
"""
@brief Initialise a sparse block grid-map with arc-based sensor updates.
@param scale The multiplier to rescale from input units to map cell size.
"""
self._scale = scale
self._map = BlockSparseMatrix()
def update(self,position,distance,sensorangle,sensor):
"""
@brief Update the map with a sensor reading.
@param position The robot's current position given as (x,y,theta) for hte robot's position and angle.
@param distance The distance measurement from the sensor.
@param sensorangle The current angle from the robot's forward direction to the sensor.
@param sensor A dict holding sensor-specific hardware data (see SonarSensor in this file).
"""
#generate the angle positions (change angleUpdates for more accurate approximation)
angleUpdates = 4
thetas = []
for i in xrange(angleUpdates-1):
thetas.append(position[2] + i*sensor["spread"]/angleUpdates - sensor["spread"]/2. + sensorangle)
thetas.append(position[2] + sensor["spread"]/2. + sensorangle)
#generate the arc and robot positions
positions = [numpy.array(position[:2])*self._scale]
for t in thetas:
positions.append(
numpy.round(
numpy.array([numpy.cos(t),numpy.sin(t)]) *
distance *
self._scale + positions[0]
).astype(numpy.int64)
)
positions[0] = numpy.round(positions[0]).astype(numpy.int64)
#FILL THE EMPTY ARC AREA OF THE SENSOR (as an approximate polygon)
emptyVal = sensor["phitfree"]
for cell in BresenhamPolygon(positions):
self._map[cell[0],cell[1]] = max(emptyVal+self._map[cell[0],cell[1]],-20.) #clip to -20
#DO BRESENHAM detection on the arc edge for object hits
hitVals = BresenhamLine(positions[1],positions[2])
solidVal = sensor["phitoccupied"]
startpt = 0
for i in xrange(1,len(positions)-1):
hitVals = BresenhamLine(positions[i],positions[i+1])
solidVal = sensor["phitoccupied"]
for h in hitVals[startpt:]:
self._map[h[0],h[1]] = min(solidVal+self._map[h[0],h[1]],120.) #clip to 120
startpt = 1 #skip the first part of all following line segments
def get(self,location):
"""
@brief Get the value at a certain x,y location.
@param location A location in the form [x,y]
"""
location = numpy.round(location*self._scale).astype(numpy.int64)
return self._map(location[0],location[1])
def getRange(self,topleft,bottomright):
"""
@brief Get the values for a range of locations as a matrix. Note: this returns at the internal scale, not the external scale
@param topleft A location in the form [x,y] in external units designating the top left of the area
@param bottomright A location in the form [x,y] in external units designating the bottom right of the area
"""
#convert into map scale
topleft = numpy.round(numpy.array(topleft)*self._scale).astype(numpy.int64)
bottomright = numpy.round(numpy.array(bottomright)*self._scale).astype(numpy.int64)
#fill in the output
result = numpy.zeros((bottomright[0]-topleft[0],bottomright[1]-topleft[1]))
for i in xrange(topleft[0],bottomright[0]):
ival = numpy.round(i).astype(numpy.int64)
for j in xrange(topleft[1],bottomright[1]):
jval = numpy.round(j).astype(numpy.int64)
result[i-topleft[0],j-topleft[1]] = self._map[ival,jval]
return result
if __name__ == '__main__':
"""
Do validation test
"""
import time,os
from matplotlib import pyplot
#set this true and have mencoder to create a video of the test
makevideo = True
#set up the map and scale
scale = 100.0
groundtruth = ((1,1,1,1,1),
(1,0,0,0,1),
(1,0,1,0,1),
(1,0,0,0,1),
(1,1,1,1,1))
gridScale = 0.5
#set up the grid map on a 2cm scale (half the input resolution)
estmap = GridMap(scale=gridScale)
#this is the set of positions the rover moves between
tour = ((150.0,150.0,0.0),(350.0,150.0,0.0),
(350.0,150.0,numpy.pi/2.0),(350.0,350.0,numpy.pi/2.0),
(350.0,350.0,numpy.pi),(150.0,350.0,numpy.pi),
(150.0,350.0,numpy.pi*1.5),(150.0,150.0,numpy.pi*1.5),(150.0,150.0,numpy.pi*2))
#this is the number of steps along each part of the tour
divs =100
vals = []
for i in xrange(len(tour)-1):
for j in xrange(divs):
position = numpy.array(tour[i])*(1.-j/float(divs))+numpy.array(tour[(i+1)%len(tour)])*(j/float(divs))
p = position[:2]
a = -position[2]+numpy.pi
offset = numpy.array([numpy.sin(a),numpy.cos(a)])*20.
for k in xrange(4):
#simulate each of the sonar sensor sweeps and see if we hit anything.
sensor = SonarSensor
sensorangle = numpy.pi/2*k
thetamax = position[2] + sensor["spread"]/2. + sensorangle
thetamin = position[2] - sensor["spread"]/2. + sensorangle
baseB = numpy.array([numpy.cos(thetamax),numpy.sin(thetamax)])
baseC = numpy.array([numpy.cos(thetamin),numpy.sin(thetamin)])
hit = False
for distance in xrange(int(sensor["range"])):
B = numpy.round(baseB*distance + position[:2]).astype(numpy.int32)
C = numpy.round(baseC*distance + position[:2]).astype(numpy.int32)
for pos in BresenhamLine(B,C):
if groundtruth[int((pos[0]/scale))][int((pos[1]/scale))] == 1:
distance = numpy.linalg.norm(position[:2] - pos) #add noise in here if you want noise
hit = True
break
if hit:
t0 = time.time()
estmap.update(position,distance,sensorangle,sensor)
vals.append(time.time()-t0)
break
if not hit:
t0 = time.time()
estmap.update(position,distance,sensorangle,sensor)
vals.append(time.time()-t0)
if makevideo: #save out png's for the video
fname = '_tmp%05d.png'%(i*divs+j)
tl = (95,95)
print (i*divs+j)
robot = (numpy.array([p+offset,p-offset,p+numpy.array([-offset[1],offset[0]])])*gridScale-numpy.array(tl)*gridScale).astype(numpy.int64)
emap = numpy.clip(estmap.getRange(tl,(405,405)), -1000,1000 )
for cell in BresenhamTriangle(robot[0],robot[1],robot[2]):
emap[cell[0],cell[1]] = 120
pyplot.imsave(fname,emap)
pyplot.clf()
print "Mean Sensor Update Time:", numpy.mean(vals)
if makevideo: #convert png's to video
#recent ubuntu versions use avconv
os.system("avconv -r 30 -i _tmp%05d.png -b:v 1000k rovertest.mp4")
#os.system("mencoder 'mf://*.png' -mf type=png:fps=30 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o rovertest.avi")
os.system("rm -f _tmp*.png")
|
|
#!/usr/bin/env python
"""
==============================================================================
Program: wb1.grp.py
Author: Kyle Reese Almryde
Date: 11/19/2012 @ 04:00:46 PM
Description: The purpose of this program is to perform a Group 1 sample ttest
on the imaging data for the Word Boundary Laterality project. In
addition to preforming the ttest analysis, this program extracts
and reports regions of interest from the resultant neural
activation maps that hare deemed significant from the ttest.
==============================================================================
"""
import sys
import os
def usage_message():
print """
------------------------------------------------------------------------
+ +++ No arguments provided! +++ +
+ +
+ This program requires at least 2 arguments. +
+ +
+ NOTE: [words] in square brackets represent possible input. +
+ See below for available options. +
+ +
------------------------------------------------------------------------
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ Argument 1: Experimental condition +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ +
+ [learn] For the Learnable Condtion +
+ [unlearn] For the Unlearnable Condtion +
+ +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ Argument 2: Analysis Operation +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ +
+ [pre] Construct an un-edited statistical mask +
+ [post] Compute stats on editied Statistical Mask +
+ +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
------------------------------------------------------------------------
+ Example command-line execution: +
+ +
+ python wb1.grp.py learn pre +
+ +
+ +++ Please try again +++ +
------------------------------------------------------------------------
"""
def groupImageStats(imgFile, outImage, brik=''):
""" Strip the desired image statistics from the image file
Specifically, remove those subbricks from specified from the
supplied image, and store them in their own file that can be
manipulated more easily later on.
Params:
imgFile -- The input 4d file. It can be a subject image
file or a group image file, so long as at
least 2 subbricks reside within the image.
The image should contain the desired path.
'/path/to/image/file/4dImage.nii.gz'
Optionally, a list of 4d images can be supplied
in which case a string will be constructed
using a list comprehension.
brik -- The desired subbrik(s) to be extracted. AFNI
conventions for specifying subbriks apply.
outImage -- The desired prefix for the newly created
image file. The path name should be included
in the image prefix
Returns:
A string composed of the output image's path and name,
in case it is needed.
"""
if type(imgFile) == list:
imgFile = ' '.join([x + brik for x in imgFile])
else:
imgFile = imgFile + brik
os.system('3dbucket -prefix ' + outImage + ' ' + imgFile)
return outImage
def computeImageMean(imgList, outImage, brik=''):
""" using 3dmean, average datasets
Params:
imgList -- A list of 4d images to be averaged. It is assumed
the list has already been stripped.
brik -- an optional parameter which can specify a subbrik.
outImage -- The desired prefix for the newly created
image file. The path name should be included
in the image prefix
Returns:
A string composed of the output image's path and name,
in case it is needed.
"""
imgFiles = ' '.join([x + brik for x in imgList])
os.system('3dMean -prefix ' + outImage + ' ' + imgFiles)
return outImage
def subj_NoNeg(imgFile, outImage):
""" One line description
Params:
imgFile -- The input 4d image
outImage -- The desired prefix for the newly created image file
Returns:
None
"""
os.system('3dmerge -1noneg -prefix ' + outImage + ' ' + imgFile)
def statMask(imgFile, outMaskImg, cluster='273', plvl='0.01'):
""" Create a statistical mask image
Params:
imgFile -- The input 4d image
outMaskImg -- The desired output name
cluster -- The number of clusters, default is 273
plvl -- The corrected alpha level, default is 0.01
Returns:
None
"""
# See if you cant use a proper stats function via python instead of the afni one here
thresh = os.popen("ccalc -expr 'fitt_p2t(" + plvl + "000,128)'").read().strip()
os.system('3dmerge -1noneg -1tindex 1 -dxyz=1'
+ ' -1clust_order 1.01 ' + cluster
+ ' -1thresh ' + thresh
+ ' -prefix ' + outMaskImg + ' ' + imgFile)
def oneSample_tTest(inputImg, maskFile, outImage, brik=''):
""" perform a one sample tTest
Params:
inputImg --
maskFile --
brik --
outImage --
Returns:
Description of returns
"""
if type(inputImg) == list:
imgFile = ' '.join([x + brik for x in inputImg])
else:
imgFile = inputImg + brik
os.system('3dttest++ -setA ' + imgFile
+ ' -mask ' + maskFile
+ ' -prefix ' + outImage)
def getSubjStats_ROI(imgFile, maskImg):
"""Get ROI statistics per subject per ROI
Params:
imgFile -- The subject image file
maskImg -- The ROI mask image
Returns:
A tuple containing the subject#, Scan, Condition, event, Side, ROI, volume, mean, and stdev
"""
imgInfo = imgFile.split('/')[-1].split('_')
maskInfo = maskImg.split('/')[-1].split('_')
roiStats = os.popen('3dmaskave -sigma -mask ' + maskImg + ' ' + imgFile).readlines()[-1].strip().split()
mean = roiStats[0]
stdev = roiStats[1]
volume = roiStats[2][1:]
return '\t'.join([imgInfo[1], imgInfo[0], imgInfo[2], 'sent', maskInfo[2], maskInfo[3][:-4], volume, mean, stdev, imgFile, maskImg])
def getClusterStats_tTest(imgFile): # This has potential to be very Modular, I just need to decide if I like it enough
"""Extract cluster stats from tTest image file
This function uses the os mondule popen to capture output from
afni's 3dclust command. Presently it assumes the image is in
2x2x2 resolution. Output is the mean and peak voxel intensity
followed by the peak xyz coordinates
Params:
imgFile -- a 4D Image, path included eg,
'/path/to/image/file/4dImage.nii.gz'
Returns:
stats -- a string containing the values for Scan, Condition, event, Side, ROI, volume, mean
"""
imgInfo = imgFile.split('/')[-1].split('_')
clusterTbl = os.popen('3dclust -orient RPI -1noneg -1dindex 0 -1tindex 1 -1thresh 2.040 2 0 ' + imgFile).readlines()[-1].strip() # Strip newline and get last line with the stats output table from 3dclust
mean = clusterTbl.split()[-6] # get the mean of the image file
volume = clusterTbl.split()[0]
return '\t'.join([imgInfo[0], imgInfo[1], imgInfo[2], imgInfo[-1][:-7], imgInfo[3], imgInfo[4], volume, mean, 'NA', imgFile])
#=============================== START OF MAIN ===============================
def main():
"""Generate unedited statistical masks from group averaged single subject data"""
cond = sys.argv[1]
operation = sys.argv[2]
subjDict = {'learn': [13, 16, 19, 21, 23, 27, 28, 33, 35, 39, 46, 50, 57, 67, 69, 73, 'learnable'],
'unlearn': [9, 11, 12, 18, 22, 30, 31, 32, 38, 45, 47, 48, 49, 51, 59, 60, 'unlearnable']}
condition = subjDict[cond][-1] # learnable or unlearnable depending on value of cond
for scan in ('Run1', 'Run2', 'Run3'):
scanDir = scan + '/'
#---------------------------------#
# Define pointers for GRP results #
#---------------------------------#
ANOVA = '/Volumes/Data/WB1/ANOVA/'
COMBO = ANOVA + 'Combo/' + scanDir
MASK = ANOVA + 'Mask/' + scanDir
MEAN = ANOVA + 'Mean/' + scanDir
MERG = ANOVA + 'Merge/' + scanDir
TTEST = ANOVA + 'tTest/' + scanDir
REPORT = ANOVA + 'Report/' + scanDir
subStatsImgList = []
#--------------------#
# Initiate functions #
#--------------------#
if operation == 'pre':
#---------------------------#
# Begin pre-Mask operations #
#---------------------------#
for subj in subjDict[cond][0:-1]:
subj = 'sub%003d' % subj
subjDir = subj + '/'
# STATS is the directory containing the individual subject images output from the glm
STATS = '/Volumes/Data/WB1/GLM/' + subjDir + 'Glm/' + scanDir + 'Stats/'
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing groupImageStats():
# ++ Strip coef and tstat briks for the sent condition from subject files, bucket them into their own file.
# sub4dImg -- input file
# brik -- subbrik index for the coef and tstat of the sent condition
# subStatsImg -- output file name
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sub4dImg = STATS + '_'.join([scan.lower(), subj, 'tshift_volreg_despike_mni_7mm_164tr_0sec', condition]) + '.stats.nii.gz'
subStatsImg = COMBO + '_'.join([scan.lower(), subj, condition, 'sent', 'Stats']) + '.nii.gz'
brik = '[12,13]'
# Store new files in a list which will be used as input to the next function
subStatsImgList.append(groupImageStats(sub4dImg, subStatsImg, brik))
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing computeImageMean():
# ++ Compute group mean image of all subjects for the coef and tstat subbriks
# subStatsImgList -- input file; List containing the subjects computed stat image
# grpMeanCoef -- output file; The group mean of the coef statistic
# grpMeanTstat -- output file; The group mean of the tstat statistic
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
grpMeanCoef = MEAN + '_'.join([scan.lower(), condition, 'sent', 'Group', 'Mean', 'Coef']) + '.nii.gz'
grpMeanTstat = MEAN + '_'.join([scan.lower(), condition, 'sent', 'Group', 'Mean', 'Tstat']) + '.nii.gz'
computeImageMean(subStatsImgList, grpMeanCoef, '[0]')
computeImageMean(subStatsImgList, grpMeanTstat, '[1]')
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing groupImageStats():
# ++ Combine the mean coef and tstat images
# grpMeanList -- input file; List containing of the mean coef and tstat images
# grpStatsMean -- output file; Combined file containing the mean coef and tstat images
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
grpMeanList = [grpMeanCoef, grpMeanTstat]
grpStatsMean = MEAN + '_'.join([scan.lower(), condition, 'sent', 'Group', 'StatsMean']) + '.nii.gz'
groupImageStats(grpMeanList, grpStatsMean)
os.system('3drefit -substatpar 1 fitt 128 ' + grpStatsMean)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing subj_NoNeg():
# ++ Compute Positive activation only images
# subjImg -- input file;
# outImgNoNoeg -- output file;
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for subjImg in subStatsImgList:
inputImage = subjImg
subjImg = subjImg.split('/')[-1].split('_')
outImgNoNeg = MERG + '_'.join([x for x in subjImg[:-1]] + ['NoNeg', subjImg[-1]])
subj_NoNeg(inputImage, outImgNoNeg)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing statMask():
# ++ Combine the mean coef and tstat images
# grpStatsMean -- input file; Combined file containing the mean coef and tstat images
# clusterMask -- output file; A statistical mask containing statistically significant clusters
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
clusterMask01 = MASK + '_'.join([scan.lower(), condition, '01', 'cm', 'sent', 'statMask']) + '.nii.gz'
clusterMask05 = MASK + '_'.join([scan.lower(), condition, '05', 'cm', 'sent', 'statMask']) + '.nii.gz'
statMask(grpStatsMean, clusterMask01)
statMask(grpStatsMean, clusterMask05, '1402', '05')
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++End pre-Mask operations++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
elif operation == 'post':
#----------------------------#
# Begin post-Mask operations #
#----------------------------#
fout = open(REPORT + '_'.join([scan, condition, 'Report']) + '.txt', 'a+')
fout.write('ID\tRun\tCondition\tEvent\tSide\tROI\tVolume\tT-Mean\tT-Stdev\n')
# A list containing statistical masks for the given condition
maskImgList = os.popen('ls ' + MASK + '*_' + condition + '*').read().split('\n')[:-1]
ttestInputList = os.popen('ls ' + COMBO + '*sub0*_' + condition + '*').read().split('\n')[:-1]
subjNoNegList = os.popen('ls ' + MERG + '*sub0*_' + condition + '*').read().split('\n')[:-1]
print '\n Mask Image List \n', maskImgList
print '\n Ttest Image List \n', ttestInputList
print '\n Subject Image List \n', subjNoNegList
tTestReportList = []
subjReportList = []
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing oneSample_tTest():
# ++ Combine the mean coef and tstat images
# ttestInputList -- input file; A list containing the subject Stat images
# inputMask -- input file; Individual element from maskImgList used as the actual input mask file.
# outImage -- output file; A one sample Ttest stat image
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for inputMask in maskImgList:
ttestOutImage = TTEST + '_'.join(['tTest', inputMask.split('/')[-1][:-4], 'sent']) + '.nii.gz'
oneSample_tTest(ttestInputList, inputMask, ttestOutImage)
tTestReportList.append(getClusterStats_tTest(ttestOutImage))
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Executing getSubjStats_ROI():
# ++ Combine the mean coef and tstat images
# ttestInputList -- input file; A list containing the subject Stat images
# inputMask -- input file; Individual element from maskImgList used as the actual input mask file.
# outImage -- output file; A one sample Ttest stat image
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for image in [(mask, noNeg) for mask in maskImgList for noNeg in subjNoNegList]:
maskImg = image[0]
imgFile = image[1]
subjReportList.append(getSubjStats_ROI(imgFile, maskImg))
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Write Report
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
fout.write('\n'.join(tTestReportList) + '\n')
fout.write('\n'.join(subjReportList))
fout.close()
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++End Post-Mask operations+++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#--------------------#
# End functions #
#--------------------#
print """
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
\tMAIN has completed for %s
========================================================
""" % scan
if __name__ == '__main__':
main()
'''
#------------------------------------------------------------------------
# Description: ClusterSim_Output
#
# Purpose: This is an unused function provided here for documention
# purposes. It contains the 3dClustSim command used to
# generate the Cluster Size Threshold Report provided below
#------------------------------------------------------------------------
# 3dClustSim -nxyz 91 109 91 -dxyz 2 2 2 -fwhm 7 -pthr 0.05 0.01
# Grid: 91x109x91 2.00x2.00x2.00 mm^3 (902629 voxels)
#
# CLUSTER SIZE THRESHOLD(pthr,alpha) in Voxels
# -NN 1 | alpha = Prob(Cluster >= given size)
# pthr | 0.100 0.050 0.020 0.010
# ------ | ------ ------ ------ ------
0.050000 1018.3 1135.0 1289.0 1402.0
0.010000 205.1 226.1 252.0 273.0
'''
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow interface for third-party optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = ['ExternalOptimizerInterface', 'ScipyOptimizerInterface']
class ExternalOptimizerInterface(object):
"""Base class for interfaces with external optimization algorithms.
Subclass this and implement `_minimize` in order to wrap a new optimization
algorithm.
`ExternalOptimizerInterface` should not be instantiated directly; instead use
e.g. `ScipyOptimizerInterface`.
@@__init__
@@minimize
"""
def __init__(self,
loss,
var_list=None,
equalities=None,
inequalities=None,
var_to_bounds=None,
**optimizer_kwargs):
"""Initialize a new interface instance.
Args:
loss: A scalar `Tensor` to be minimized.
var_list: Optional `list` of `Variable` objects to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
equalities: Optional `list` of equality constraint scalar `Tensor`s to be
held equal to zero.
inequalities: Optional `list` of inequality constraint scalar `Tensor`s
to be held nonnegative.
var_to_bounds: Optional `dict` where each key is an optimization
`Variable` and each corresponding value is a length-2 tuple of
`(low, high)` bounds. Although enforcing this kind of simple constraint
could be accomplished with the `inequalities` arg, not all optimization
algorithms support general inequality constraints, e.g. L-BFGS-B. Both
`low` and `high` can either be numbers or anything convertible to a
NumPy array that can be broadcast to the shape of `var` (using
`np.broadcast_to`). To indicate that there is no bound, use `None` (or
`+/- np.infty`). For example, if `var` is a 2x3 matrix, then any of
the following corresponding `bounds` could be supplied:
* `(0, np.infty)`: Each element of `var` held positive.
* `(-np.infty, [1, 2])`: First column less than 1, second column less
than 2.
* `(-np.infty, [[1], [2], [3]])`: First row less than 1, second row less
than 2, etc.
* `(-np.infty, [[1, 2, 3], [4, 5, 6]])`: Entry `var[0, 0]` less than 1,
`var[0, 1]` less than 2, etc.
**optimizer_kwargs: Other subclass-specific keyword arguments.
"""
self._loss = loss
self._equalities = equalities or []
self._inequalities = inequalities or []
if var_list is None:
self._vars = variables.trainable_variables()
else:
self._vars = list(var_list)
packed_bounds = None
if var_to_bounds is not None:
left_packed_bounds = []
right_packed_bounds = []
for var in self._vars:
shape = var.get_shape().as_list()
bounds = (-np.infty, np.infty)
if var in var_to_bounds:
bounds = var_to_bounds[var]
left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
self._packed_bounds = packed_bounds
self._update_placeholders = [
array_ops.placeholder(var.dtype) for var in self._vars
]
self._var_updates = [
var.assign(array_ops.reshape(placeholder, _get_shape_tuple(var)))
for var, placeholder in zip(self._vars, self._update_placeholders)
]
loss_grads = _compute_gradients(loss, self._vars)
equalities_grads = [
_compute_gradients(equality, self._vars)
for equality in self._equalities
]
inequalities_grads = [
_compute_gradients(inequality, self._vars)
for inequality in self._inequalities
]
self.optimizer_kwargs = optimizer_kwargs
self._packed_var = self._pack(self._vars)
self._packed_loss_grad = self._pack(loss_grads)
self._packed_equality_grads = [
self._pack(equality_grads) for equality_grads in equalities_grads
]
self._packed_inequality_grads = [
self._pack(inequality_grads) for inequality_grads in inequalities_grads
]
dims = [_prod(_get_shape_tuple(var)) for var in self._vars]
accumulated_dims = list(_accumulate(dims))
self._packing_slices = [
slice(start, end)
for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
]
def minimize(self,
session=None,
feed_dict=None,
fetches=None,
step_callback=None,
loss_callback=None,
**run_kwargs):
"""Minimize a scalar `Tensor`.
Variables subject to optimization are updated in-place at the end of
optimization.
Note that this method does *not* just return a minimization `Op`, unlike
`Optimizer.minimize()`; instead it actually performs minimization by
executing commands to control a `Session`.
Args:
session: A `Session` instance.
feed_dict: A feed dict to be passed to calls to `session.run`.
fetches: A list of `Tensor`s to fetch and supply to `loss_callback`
as positional arguments.
step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
loss_callback: A function to be called every time the loss and gradients
are computed, with evaluated fetches supplied as positional arguments.
**run_kwargs: kwargs to pass to `session.run`.
"""
session = session or ops.get_default_session()
feed_dict = feed_dict or {}
fetches = fetches or []
loss_callback = loss_callback or (lambda *fetches: None)
step_callback = step_callback or (lambda xk: None)
# Construct loss function and associated gradient.
loss_grad_func = self._make_eval_func([self._loss,
self._packed_loss_grad], session,
feed_dict, fetches, loss_callback)
# Construct equality constraint functions and associated gradients.
equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict,
fetches)
equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads,
session, feed_dict, fetches)
# Construct inequality constraint functions and associated gradients.
inequality_funcs = self._make_eval_funcs(self._inequalities, session,
feed_dict, fetches)
inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads,
session, feed_dict, fetches)
# Get initial value from TF session.
initial_packed_var_val = session.run(self._packed_var)
# Perform minimization.
packed_var_val = self._minimize(
initial_val=initial_packed_var_val,
loss_grad_func=loss_grad_func,
equality_funcs=equality_funcs,
equality_grad_funcs=equality_grad_funcs,
inequality_funcs=inequality_funcs,
inequality_grad_funcs=inequality_grad_funcs,
packed_bounds=self._packed_bounds,
step_callback=step_callback,
optimizer_kwargs=self.optimizer_kwargs)
var_vals = [
packed_var_val[packing_slice] for packing_slice in self._packing_slices
]
# Set optimization variables to their new values.
session.run(
self._var_updates,
feed_dict=dict(zip(self._update_placeholders, var_vals)),
**run_kwargs)
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
packed_bounds, step_callback, optimizer_kwargs):
"""Wrapper for a particular optimization algorithm implementation.
It would be appropriate for a subclass implementation of this method to
raise `NotImplementedError` if unsupported arguments are passed: e.g. if an
algorithm does not support constraints but `len(equality_funcs) > 0`.
Args:
initial_val: A NumPy vector of initial values.
loss_grad_func: A function accepting a NumPy packed variable vector and
returning two outputs, a loss value and the gradient of that loss with
respect to the packed variable vector.
equality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold exactly zero.
equality_grad_funcs: A list of gradients of equality_funcs.
inequality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold >= 0.
inequality_grad_funcs: A list of gradients of inequality_funcs.
packed_bounds: A list of bounds for each index, or `None`.
step_callback: A callback function to execute at each optimization step,
supplied with the current value of the packed variable vector.
optimizer_kwargs: Other key-value arguments available to the optimizer.
Returns:
The optimal variable vector as a NumPy vector.
"""
raise NotImplementedError(
'To use ExternalOptimizerInterface, subclass from it and implement '
'the _minimize() method.')
@classmethod
def _pack(cls, tensors):
"""Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`."""
if not tensors:
return None
elif len(tensors) == 1:
return array_ops.reshape(tensors[0], [-1])
else:
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
return array_ops.concat(flattened, 0)
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
"""Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
if not isinstance(tensors, list):
tensors = [tensors]
num_tensors = len(tensors)
def eval_func(x):
"""Function to evaluate a `Tensor`."""
augmented_feed_dict = {
var: x[packing_slice].reshape(_get_shape_tuple(var))
for var, packing_slice in zip(self._vars, self._packing_slices)
}
augmented_feed_dict.update(feed_dict)
augmented_fetches = tensors + fetches
augmented_fetch_vals = session.run(
augmented_fetches, feed_dict=augmented_feed_dict)
if callable(callback):
callback(*augmented_fetch_vals[num_tensors:])
return augmented_fetch_vals[:num_tensors]
return eval_func
def _make_eval_funcs(self,
tensors,
session,
feed_dict,
fetches,
callback=None):
return [
self._make_eval_func(tensor, session, feed_dict, fetches, callback)
for tensor in tensors
]
class ScipyOptimizerInterface(ExternalOptimizerInterface):
"""Wrapper allowing `scipy.optimize.minimize` to operate a `tf.Session`.
Example:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [0., 0.].
```
Example with simple bound constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(
loss, var_to_bounds={vector: ([1, 2], np.infty)})
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 2.].
```
Example with more complicated constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
# Ensure the vector's y component is = 1.
equalities = [vector[1] - 1.]
# Ensure the vector's x component is >= 1.
inequalities = [vector[0] - 1.]
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
optimizer = ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities, method='SLSQP')
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 1.].
```
"""
_DEFAULT_METHOD = 'L-BFGS-B'
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
packed_bounds, step_callback, optimizer_kwargs):
def loss_grad_func_wrapper(x):
# SciPy's L-BFGS-B Fortran implementation requires gradients as doubles.
loss, gradient = loss_grad_func(x)
return loss, gradient.astype('float64')
method = optimizer_kwargs.pop('method', self._DEFAULT_METHOD)
constraints = []
for func, grad_func in zip(equality_funcs, equality_grad_funcs):
constraints.append({'type': 'eq', 'fun': func, 'jac': grad_func})
for func, grad_func in zip(inequality_funcs, inequality_grad_funcs):
constraints.append({'type': 'ineq', 'fun': func, 'jac': grad_func})
minimize_args = [loss_grad_func_wrapper, initial_val]
minimize_kwargs = {
'jac': True,
'callback': step_callback,
'method': method,
'constraints': constraints,
'bounds': packed_bounds,
}
for kwarg in minimize_kwargs:
if kwarg in optimizer_kwargs:
if kwarg == 'bounds':
# Special handling for 'bounds' kwarg since ability to specify bounds
# was added after this module was already publicly released.
raise ValueError(
'Bounds must be set using the var_to_bounds argument')
raise ValueError(
'Optimizer keyword arg \'{}\' is set '
'automatically and cannot be injected manually'.format(kwarg))
minimize_kwargs.update(optimizer_kwargs)
if method == 'SLSQP':
# SLSQP doesn't support step callbacks. Obviate associated warning
# message.
del minimize_kwargs['callback']
import scipy.optimize # pylint: disable=g-import-not-at-top
result = scipy.optimize.minimize(*minimize_args, **minimize_kwargs)
message_lines = [
'Optimization terminated with:',
' Message: %s',
' Objective function value: %f',
]
message_args = [result.message, result.fun]
if hasattr(result, 'nit'):
# Some optimization methods might not provide information such as nit and
# nfev in the return. Logs only available information.
message_lines.append(' Number of iterations: %d')
message_args.append(result.nit)
if hasattr(result, 'nfev'):
message_lines.append(' Number of functions evaluations: %d')
message_args.append(result.nfev)
logging.info('\n'.join(message_lines), *message_args)
return result['x']
def _accumulate(list_):
total = 0
yield total
for x in list_:
total += x
yield total
def _get_shape_tuple(tensor):
return tuple(dim.value for dim in tensor.get_shape())
def _prod(array):
prod = 1
for value in array:
prod *= value
return prod
def _compute_gradients(tensor, var_list):
grads = gradients.gradients(tensor, var_list)
# tf.gradients sometimes returns `None` when it should return 0.
return [
grad if grad is not None else array_ops.zeros_like(var)
for var, grad in zip(var_list, grads)
]
|
|
from copy import deepcopy
from string import replace
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from openbudgets.apps.transport.incoming.errors import DataValidationError
ITEM_SEPARATOR = settings.OPENBUDGETS_IMPORT_INTRA_FIELD_MULTIPLE_VALUE_DELIMITER
class ParsingError(Exception):
pass
class BaseParser(object):
"""
Parsers take a raw dataset, usually from an importer, and transform
it ultimately to persisted data in the datastore.
This process is combined from 2 steps: validate and save.
If the two steps are to be divided asynchronously then it's possible to
get a serializable deferred object after validation, which can later be
resolved and saved by the same parser class.
"""
#PATH_DELIMITER = settings.OPENBUDGETS_IMPORT_INTRA_FIELD_DELIMITER
PATH_DELIMITER = ','
ITEM_SEPARATOR = ITEM_SEPARATOR
ITEM_MODEL_CLEANING_EXCLUDE = tuple()
def __init__(self, container_object_dict):
self.valid = True
self.dry = False
self.errors = []
self.saved_cache = {}
self.objects_lookup = {}
# we usually get this from the importer
self.container_object_dict = container_object_dict
# objects to delete at the end of process
self.dirty_list = []
@classmethod
def resolve(cls, deferred):
"""
Resolve a deferred representation of a parser's data.
Instantiates the class and returns an instance that
should be ready for saving.
"""
container_dict = deferred['container']
if not container_dict:
raise Exception('Deferred object missing container dict: %s' % container_dict)
instance = cls(container_dict)
instance.objects_lookup = deferred['items']
# basically we're only sure the objects lookup is generated but this
# should have been side effect of validating the data
return instance
def clean(self, data):
for row_num, obj in enumerate(data):
parent_scope = obj.get('parentscope')
inverse_scopes = obj.get('inversescope')
if parent_scope:
obj['parentscope'] = replace(parent_scope, '|', ',')
if inverse_scopes:
inverse_scopes = [replace(scope, '|', ',') for scope in ITEM_SEPARATOR.split(inverse_scopes)]
obj['inversescope'] = ITEM_SEPARATOR.join(inverse_scopes)
return data
def validate(self, data, keep_cache=False):
"""
Takes a dataset, cleans it, prepares it for saving
and runs all possible validations to make sure
that a consecutive call on this prepared data
sends it straight to the datastore without any exceptions.
It generate a lookup dictionary of the dataset that will later be
iterated upon when saving.
If the `keep_cache` argument is `True` then the parser's `saved_cache`
property, used for storing item instances created from the dataset, is
not cleared at the end of the validation.
You'll have to explicitly call `_clear_cache()` after that when you
need to.
Returns a boolean if validation is successful and a list of errors that
were thrown during validation.
"""
data = self.clean(data)
# generate a lookup table with each item uniquely identified
self._generate_lookup(data)
self.keep_cache = keep_cache
# run a dry save of the data
self.save(dry=True)
return self.valid, self.errors
def save(self, dry=False):
"""
Saves the pre-stored objects in the `objects_lookup` dict, as items.
Also creates the container from the data given on instantiation.
When running in dry mode nothing is actually saved to the datastore
and nothing is persisted.
All the generated model instances are stored in the `saved_cache` dict
and never saved to datastore.
"""
self.dry = dry
# create an instance of the container
self._create_container()
if dry:
# save an untempered copy
lookup_table_copy = deepcopy(self.objects_lookup)
# loop the lookup table and save every item
self._save_items()
if dry:
if not self.keep_cache:
self._clear_cache()
# clear all changes by replacing the lookup with the old copy
self.objects_lookup = lookup_table_copy
self.dry = False
return True
def deferred(self):
"""
Generates and returns a deferredable representation of the
parser.
"""
return {
'container': self.container_object_dict,
'items': self.objects_lookup
}
def throw(self, error):
"""
Takes an error instance from the `incoming.errors` module and
appends it to the list of errors.
Also makes sure `valid` is `False`.
"""
self.valid = False
self.errors.append(error)
return self
def cleanup(self, *objects):
"""Delete all unneeded entities."""
if len(objects):
# if used as a setter, append objects to the list
self.dirty_list += objects
else:
# used as getter simply flush the list
for item in self.dirty_list:
item.delete()
return self
def _generate_lookup(self, data):
"""
Generates the data's objects lookup table for saving them later.
This method needs to be implemented by non-abstract implementations.
"""
raise NotImplementedError
def _save_items(self):
"""Saves all the objects in `self.objects_lookup` into DB.
If this is a dry run then just attempts to validate the save process.
"""
for key, obj in self.objects_lookup.iteritems():
self._save_item(obj, key)
def _save_item(self, obj, key):
"""
Saves a given object as an item - as in instance of `item_model` attribute.
This method needs to be implemented by non-abstract implementations.
"""
raise NotImplementedError
def _create_item(self, obj, key):
self._clean_object(obj, key)
if not self.dry:
item = self.item_model.objects.create(**obj)
else:
item = self.item_model(**obj)
row_num = self.rows_objects_lookup.get(key, None)
self._dry_clean(item, row_num=row_num, exclude=self.ITEM_MODEL_CLEANING_EXCLUDE)
return item
def _clean_object(self, obj, key):
"""
Cleans an object before it's used as attributes for an item's
model instance creation.
Removes every attribute that is not in the `ITEM_ATTRIBUTES` dict.
"""
attrs_to_clean = []
for attr in obj:
if attr not in self.ITEM_ATTRIBUTES:
attrs_to_clean.append(attr)
if len(attrs_to_clean):
for attr in attrs_to_clean:
del obj[attr]
def _create_container(self, container_dict=None, exclude=None):
"""
Creates a model instance of the `container_model`.
"""
data = container_dict or self.container_object_dict
if not self.dry:
container = self.container_model.objects.create(**data)
else:
container = self.container_model(**data)
self._dry_clean(container, exclude=exclude)
self.container_object = container
def _dry_clean(self, instance, row_num=None, exclude=None):
"""
Calls a given `instance`'s `full_clean()` method for validating it.
Validation errors are caught and exchanged for errors thrown to the
parser using `throw()`.
"""
try:
instance.full_clean(exclude=exclude)
except ValidationError as e:
self.throw(DataValidationError(reasons=e.message_dict, row=row_num))
def _clear_cache(self):
"""
Clears the `saved_cache` dict.
"""
self.saved_cache.clear()
PARSERS_MAP = {}
def register(key, parser_class):
"""
Registers a parser class in the system.
"""
PARSERS_MAP[key] = parser_class
def get_parser(key):
"""
Gets a parser class from the registry using a string key.
"""
if key in PARSERS_MAP:
return PARSERS_MAP[key]
else:
raise Exception(_('Parser for key: "{key}" does not exist').format(key=key))
def get_parser_key(cls):
"""
Gets the key from the registry under which this parser class
is stored, mostly for deferring that parser for later use.
"""
for key, parser_class in PARSERS_MAP.iteritems():
if cls is parser_class:
return key
else:
raise Exception(_('Given parser is not registered: {klass}'.format(
klass=unicode(cls))))
def autodiscover():
# we just load all the other parser modules
from openbudgets.apps.transport.incoming.parsers import template, sheet
autodiscover()
|
|
"""Server for watching and updating hue devices."""
import logging
import sys
import time
from datetime import (
datetime,
timedelta,
)
from tornado import (
gen,
locks,
)
from huegely import Bridge
from huegely.exceptions import HueError
from device import Device
from .device_server import DeviceServer
SENSOR_UPDATE_INTERVAL = 0.2
LIGHT_UPDATE_INTERVAL = 1
ROOM_UPDATE_INTERVAL = 30
UPDATE_INTERVAL = min(SENSOR_UPDATE_INTERVAL, LIGHT_UPDATE_INTERVAL, ROOM_UPDATE_INTERVAL)
lock = locks.Lock()
class HueServer(DeviceServer):
"""The Hue Server polls the hue bridge for updates and reports/handles updates to/from the central bridge."""
configuration_key = 'hue'
device_filter = 'hue.'
def __init__(self):
self.devices = {}
self.sensors_last_updated = datetime.now()
self.lights_last_updated = datetime.now()
self.rooms_last_updated = datetime.now()
self.rooms = []
if 'ip' not in self.config or 'username' not in self.config:
self.configure()
self.hue_bridge = Bridge(self.config['ip'], self.config['username'])
def configure(self):
self.config['ip'] = self.config.get('ip') or input("Please enter the ip address of your hue bridge: ")
bridge = Bridge(self.config['ip'])
if not self.config.get('username'):
print("Please press the button on your hue bridge...")
start_time = time.time()
while time.time() - 60 < start_time:
try:
self.config['username'] = bridge.get_token('phoebe-hue')
break
except HueError:
time.sleep(1)
print(".", end='')
sys.stdout.flush()
self.save_configuration()
def on_start(self):
self.update_lights()
self.update_sensors()
def _report_device_update(self, device):
print("sending data....")
self.client.send({
'command': 'update_device',
'data': device.to_dict()
})
def _update_lights(self):
"""Send updates of any changed lights to the bridge."""
# Update rooms if necessary - rooms change very rarely, so there's no need updating them as frequently as lights
if not self.rooms or datetime.now() > self.rooms_last_updated + timedelta(seconds=ROOM_UPDATE_INTERVAL):
self.rooms = [g for g in self.hue_bridge.groups() if g.group_type() == 'Room']
self.rooms_last_updated = datetime.now()
print("scanned {} rooms".format(len(self.rooms)))
if self.lights_last_updated + timedelta(seconds=LIGHT_UPDATE_INTERVAL):
for room in self.rooms:
for light in room.lights():
light_name = 'light-{}'.format(light.device_id)
print("scanned light {}".format(light_name))
light_device = Device(
name=light_name,
device_group=room.device_id,
device_type="hue." + light.__class__.__name__,
friendly_name=light._name,
data=light.state()
)
if light_name not in self.devices or self.devices[light_name] != light_device:
self.devices[light_name] = light_device
# Don't try to send anything if we're not connected to the bridge
if not self.client.is_connected:
continue
self._report_device_update(light_device)
self.lights_last_updated = datetime.now()
def _update_sensors(self):
if self.sensors_last_updated + timedelta(seconds=SENSOR_UPDATE_INTERVAL):
for sensor in self.hue_bridge.sensors():
print("scanned sensor {}".format(sensor.device_id))
sensor_name = 'sensor-{}'.format(sensor.device_id)
sensor_device = Device(
name=sensor_name,
device_type="hue." + sensor.__class__.__name__,
friendly_name=sensor._name,
data=sensor.state(max_age=1)
)
if sensor_name not in self.devices or self.devices[sensor_name] != sensor_device:
self.devices[sensor_name] = sensor_device
# Don't try to send anything if we're not connected to the bridge
if not self.client.is_connected:
continue
self._report_device_update(sensor_device)
self.sensors_last_updated = datetime.now()
@gen.coroutine
def update_lights(self):
try:
with (yield lock.acquire()):
self._update_lights()
except Exception:
logging.exception("Something went wrong trying to update devices")
finally:
# Schedule the next device update. We're doing this instead of a periodic timeout to allow
# taking into account the time it takes the scan to run.
self.main_io_loop.call_later(
LIGHT_UPDATE_INTERVAL,
self.update_lights,
)
@gen.coroutine
def update_sensors(self):
try:
with (yield lock.acquire()):
self._update_sensors()
except Exception:
logging.exception("Something went wrong trying to update sensors")
finally:
# Schedule the next device update. We're doing this instead of a periodic timeout to allow
# taking into account the time it takes the scan to run.
self.main_io_loop.call_later(
SENSOR_UPDATE_INTERVAL,
self.update_sensors,
)
def connect_callback(self):
"""Send all known data on connect to make sure the bridge is fully updated."""
if not self.rooms:
raise Exception("Rooms not loaded yet, make sure to update devices before the connect callback runs!")
for room in self.rooms:
self.client.send({
'command': 'update_group',
'data': {
'name': room.device_id,
'friendly_name': room.name()
}
})
for device_id, device in self.devices.items():
self._report_device_update(device)
def _set_state(self, device_name, data):
print("set state!")
light = [l for l in self.hue_bridge.lights() if l.device_id == int(device_name)][0]
try:
light.state(
brightness=data.get('brightness'),
hue=data.get('hue'),
saturation=data.get('saturation'),
on=data.get('on')
)
except HueError:
logging.exception("Couldn't update hue device!")
# Generally an error like this means we tried to send unsupported state.
# In a case like that, we'll want to send back the real state.
if light.device_id in self.devices:
self._report_device_update(self.devices[light.device_id])
@gen.coroutine
def command_set_state(self, device_name, data):
with (yield lock.acquire()):
return self._set_state(device_name, data)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tooling for support TPU embedding in TPUEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.feature_column import feature_column as core_fc
from tensorflow.python.feature_column import feature_column_lib as core_fc_lib
from tensorflow.python.tpu import feature_column as tpu_fc
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu.tpu_embedding import AdagradParameters
from tensorflow.python.tpu.tpu_embedding import AdamParameters
from tensorflow.python.tpu.tpu_embedding import StochasticGradientDescentParameters
# pylint: disable=protected-access
_TPU_EMBEDDING_COLUMN_CLASSES = (tpu_fc._TPUEmbeddingColumn,
tpu_fc._TPUSharedEmbeddingColumn)
_EMBEDDING_COLUMN_CLASSES = (core_fc._EmbeddingColumn,
core_fc_lib.EmbeddingColumn,
core_fc._SharedEmbeddingColumn)
_SUPPORTED_FEATURE_COLUMNS = (core_fc._NumericColumn, core_fc_lib.NumericColumn)
_SUPPORTED_OPTIMIZERS = (AdagradParameters, AdamParameters,
StochasticGradientDescentParameters)
# pylint: enable=protected-access
_TABLE_NAME_PREFIX = 'tbl_'
_LEN_TABLE_NAME_PREFIX = len(_TABLE_NAME_PREFIX)
def _get_table_name_from_embedding_var_name(embedding_var_name):
return '{}{}'.format(_TABLE_NAME_PREFIX, embedding_var_name)
def _get_embedding_var_name_from_table_name(table_name):
return table_name[_LEN_TABLE_NAME_PREFIX:]
def _get_embedding_variable_name(scope_name, var_name):
return '{}/{}'.format(scope_name, var_name)
def _get_slot_variable_names(scope_name, var_name, optimization_parameters):
"""Return embedding variable names which are consistent with CPU runs."""
if isinstance(optimization_parameters, tpu_embedding.AdagradParameters):
return tpu_embedding.AdagradSlotVariableName(
'{}/{}/Adagrad'.format(scope_name, var_name)
)
elif isinstance(optimization_parameters, tpu_embedding.AdamParameters):
return tpu_embedding.AdamSlotVariableNames(
'{}/{}/Adam/m'.format(scope_name, var_name),
'{}/{}/Adam/v'.format(scope_name, var_name)
)
elif isinstance(optimization_parameters,
tpu_embedding.StochasticGradientDescentParameters):
return None
else:
raise ValueError('Support to infer full variable name '
'for optimization_parameter {} has not been added.'
.format(optimization_parameters))
def get_full_variable_names(
graph, table_to_config_dict, optimization_parameters=None):
"""Return embedding variable names and slot variables which are consistent with CPU runs."""
collection = graph.get_collection_ref(tpu_fc._TPU_FC_TO_SCOPE) # pylint: disable=protected-access
if not collection:
raise RuntimeError(
'Embedding feature column did not capture any thing. Make sure the '
'feature columns passed to TPUEstimator constructor is properly '
'used in model_fn.')
embedding_variable_name_by_table = {}
slot_variable_names_by_table = {}
for table_name in table_to_config_dict:
embedding_var_name = _get_embedding_var_name_from_table_name(table_name)
(scope_name, var_name) = collection[0][embedding_var_name]
embedding_variable_name_by_table[table_name] = (
_get_embedding_variable_name(scope_name, var_name))
if optimization_parameters:
slot_variable_names_by_table[table_name] = _get_slot_variable_names(
scope_name, var_name, optimization_parameters)
graph.clear_collection(tpu_fc._TPU_FC_TO_SCOPE) # pylint: disable=protected-access
return embedding_variable_name_by_table, slot_variable_names_by_table
def get_tpu_embedding_config_from_feature_columns(feature_columns):
"""Create configs for TPUEmbedding from a list of feature columns.
This function will place one embedding tensor per table and the return is
intended to be used as input to TPUEmbedding.
Args:
feature_columns: a list of supported feature columns.
Returns:
A pair of dicts, the first maps tables to their config, the second maps
features to tables.
"""
allowed = (tpu_fc._TPUEmbeddingColumn, tpu_fc._TPUSharedEmbeddingColumn) # pylint: disable=protected-access
for column in feature_columns:
if not isinstance(column, allowed):
raise TypeError(
'Unsupported feature column {}. Supported types are {}.'.format(
type(column), allowed))
table_to_config = {}
feature_to_table = {}
for column in feature_columns:
feature_name = column.get_feature_key_name()
table_name = _get_table_name_from_embedding_var_name(
column.get_embedding_var_name())
if feature_name in feature_to_table:
raise ValueError(
'Feature column {} is used with multiple embeddings and this is '
'not supported.'.format(feature_name))
feature_to_table[feature_name] = table_name
vocabulary_size, dimension = column.get_embedding_table_size()
table_to_config[table_name] = tpu_embedding.TableConfig(
vocabulary_size=vocabulary_size,
dimension=dimension,
initializer=column.get_initializer(),
combiner=column.get_combiner())
return table_to_config, feature_to_table
class EmbeddingConfigSpec(
collections.namedtuple('EmbeddingConfigSpec', [
'feature_columns', 'optimization_parameters', 'clipping_limit',
])):
"""Class to keep track of embedding config specification."""
def __new__(cls,
feature_columns,
optimization_parameters,
clipping_limit=None):
"""Creates an EmbeddingConfigSpec instance.
Args:
feature_columns: All `FeatureColumn`s used by model.
optimization_parameters: An instance of `AdagradParameters`,
`AdamParameters` or `StochasticGradientDescentParameters`. This
optimizer will be applied to all embedding variables specified by
`feature_columns`.
clipping_limit: (Optional) Clipping limit (absolute value).
Returns:
An EmbeddingConfigSpec instance.
Raises:
ValueError: If the feature_columns are not specified.
TypeError: If the feature columns are not of ths correct type (one of
_SUPPORTED_FEATURE_COLUMNS, _TPU_EMBEDDING_COLUMN_CLASSES OR
_EMBEDDING_COLUMN_CLASSES).
ValueError: If `optimization_parameters` is not one of the required types.
"""
if not feature_columns:
raise ValueError('`feature_columns` cannot be `None` or empty.')
# It is unknown at this moment, whether the TPUEstimator is running in CPU
# or TPU mode. So allow non-TPU embedding columns also.
supported_classes = tuple(
list(_SUPPORTED_FEATURE_COLUMNS) + list(_TPU_EMBEDDING_COLUMN_CLASSES) +
list(_EMBEDDING_COLUMN_CLASSES))
for column in feature_columns:
if not isinstance(column, supported_classes):
raise TypeError(
'All feature columns must be supported types in {}. Got {}'.format(
supported_classes, type(column)))
if not isinstance(optimization_parameters, _SUPPORTED_OPTIMIZERS):
raise ValueError('optimization_parameters must be an instance of type '
'{}. Got {}.'.format(_SUPPORTED_OPTIMIZERS,
type(optimization_parameters)))
return super(EmbeddingConfigSpec, cls).__new__(
cls,
feature_columns=feature_columns,
optimization_parameters=optimization_parameters,
clipping_limit=clipping_limit)
class EmbeddingConfig(object):
"""This is the internal immutable object for embedding config.
`_EmbeddingConfig` is responsible to _translate_ user provided
`EmbeddingConfigSpec` to internal data structures, mostly constructor
arguments of `TPUEmbedding`.
"""
def __init__(self, embedding_config_spec, train_batch_size, eval_batch_size,
num_hosts, num_cores, run_config):
self._embedding_config_spec = embedding_config_spec
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._num_hosts = num_hosts
self._num_cores = num_cores
self._run_config = run_config
self._table_to_config_dict, self._feature_to_table_dict = (
get_tpu_embedding_config_from_feature_columns(
embedding_config_spec.feature_columns))
self._mode_to_tpu_embedding_dict = {}
self.dummy_table_variables = None
def has_embedding_tables(self):
return bool(self._table_to_config_dict)
def _create_tpu_embedding(self, mode):
"""Create tpu_embedding.TPUEmbedding based on mode."""
if mode == model_fn_lib.ModeKeys.TRAIN:
batch_size = self._train_batch_size
else:
batch_size = self._eval_batch_size
if mode == model_fn_lib.ModeKeys.TRAIN:
tpu_embedding_mode = tpu_embedding.TRAINING
optimization_parameters = (
self._embedding_config_spec.optimization_parameters)
elif (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.PREDICT):
tpu_embedding_mode = tpu_embedding.INFERENCE
optimization_parameters = None
else:
raise ValueError('Mode {} is not supported.'.format(mode))
if self._run_config.cluster:
master = self._run_config.cluster.master()
cluster_spec = self._run_config.cluster.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
else:
master = (
self._run_config.evaluation_master
if mode == model_fn_lib.ModeKeys.EVAL else self._run_config.master)
cluster_def = None
tpu_embedding_ = tpu_embedding.TPUEmbedding(
self._table_to_config_dict,
self._feature_to_table_dict,
batch_size,
tpu_embedding_mode,
master,
optimization_parameters,
cluster_def,
)
return tpu_embedding_
def get_tpu_embedding(self, mode):
if mode not in self._mode_to_tpu_embedding_dict:
self._mode_to_tpu_embedding_dict[mode] = (
self._create_tpu_embedding(mode))
return self._mode_to_tpu_embedding_dict[mode]
def split_inputs(ctx, features, labels):
"""Splits the dense and sparse tensors inside the features and labels."""
sparse_features = collections.OrderedDict()
if ctx.embedding_config:
tpu_embedding_ = ctx.embedding_config.tpu_embedding
for feature_key in tpu_embedding_.feature_to_table_dict:
sparse_features[feature_key] = features.pop(feature_key)
return features, labels, sparse_features
|
|
import tensorflow as tf
import numpy as np
from datasets.twitter import data
from datasets.twitter import data_utils
class enc_lm_seq2seq(object):
def __init__(self, state_size, vocab_size, num_layers,
model_name= 'enc_langmodel',
ckpt_path= 'ckpt/enc_langmodel/'):
self.model_name = model_name
self.ckpt_path = ckpt_path
def __graph__():
# you know what this means
tf.reset_default_graph()
#
# placeholders
xs_ = tf.placeholder(dtype=tf.int32, shape=[None, None],
name='xs')
ys_ = tf.placeholder(dtype=tf.int32, shape=[None, None],
name='ys') # decoder targets
dec_inputs_ = tf.placeholder(dtype=tf.int32, shape=[None, None],
name='dec_inputs')
# embed encoder input
embs = tf.get_variable('emb', [vocab_size, state_size])
enc_inputs = tf.nn.embedding_lookup(embs, xs_)
# embed decoder input
dec_inputs = tf.nn.embedding_lookup(embs, dec_inputs_)
# define basic lstm cell
basic_cell = tf.contrib.rnn.BasicLSTMCell(state_size, state_is_tuple=True)
# add dropout
# dropout's keep probability
keep_prob_ = tf.placeholder(tf.float32)
basic_cell = tf.contrib.rnn.DropoutWrapper(basic_cell, output_keep_prob=keep_prob_)
# stack cells
stacked_lstm = tf.contrib.rnn.MultiRNNCell([basic_cell]*num_layers, state_is_tuple=True)
with tf.variable_scope('encoder') as scope:
# define encoder
enc_op, enc_context = tf.nn.dynamic_rnn(cell=stacked_lstm, dtype=tf.float32,
inputs=enc_inputs)
###
# project enc_op
Ve = tf.get_variable('Ve', shape=[state_size, vocab_size],
initializer=tf.contrib.layers.xavier_initializer())
be = tf.get_variable('be', shape=[vocab_size],
initializer=tf.constant_initializer(0.))
###
# reshape enc_op
enc_op_reshaped = tf.reshape(enc_op, [-1, state_size])
enc_logits = tf.matmul(enc_op_reshaped, Ve) + be
# optimization
enc_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=enc_logits,
labels=tf.reshape(xs_, [-1]))
enc_loss = tf.reduce_mean(enc_losses)
enc_train_op = tf.train.AdagradOptimizer(learning_rate=0.1).minimize(enc_loss)
with tf.variable_scope('decoder') as scope:
# define decoder
dec_op, _ = tf.nn.dynamic_rnn(cell=stacked_lstm, dtype=tf.float32,
initial_state= enc_context,
inputs=dec_inputs)
###
# predictions
Vd = tf.get_variable('Vd', shape=[state_size, vocab_size],
initializer=tf.contrib.layers.xavier_initializer())
bd = tf.get_variable('bd', shape=[vocab_size],
initializer=tf.constant_initializer(0.))
####
# flatten states to 2d matrix for matmult with V
dec_op_reshaped = tf.reshape(dec_op, [-1, state_size])
# /\_o^o_/\
dec_logits = tf.matmul(dec_op_reshaped, Vd) + bd
#
# predictions
predictions = tf.nn.softmax(dec_logits)
#
# optimization
dec_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=dec_logits,
labels=tf.reshape(ys_, [-1]))
dec_loss = tf.reduce_mean(dec_losses)
dec_train_op = tf.train.AdagradOptimizer(learning_rate=0.001).minimize(dec_loss)
#
# joint training
loss = enc_loss + dec_loss
train_op = tf.train.AdagradOptimizer(learning_rate=0.001).minimize(loss)
#
# attach symbols to class
self.loss = loss
self.enc_loss = enc_loss
self.dec_loss = dec_loss
self.train_op = train_op
self.enc_train_op = enc_train_op
self.dec_train_op = dec_train_op
self.predictions = predictions
self.keep_prob_ = keep_prob_
self.xs_ = xs_
self.ys_ = ys_
self.dec_inputs_ = dec_inputs_
#####
####
# build graph
__graph__()
def train_joint(self, trainset, testset, epochs=100, n=100):
def fetch_dict(datagen, keep_prob=0.5):
bx, by = datagen.__next__()
by_dec = np.zeros_like(by).T
by_dec[1:] = by.T[:-1]
by_dec = by_dec.T
feed_dict = {
self.xs_ : bx,
self.ys_ : by,
self.dec_inputs_ : by_dec,
self.keep_prob_ : keep_prob
}
return feed_dict
##
# setup session
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# get last checkpoint
ckpt = tf.train.get_checkpoint_state(self.ckpt_path)
# verify it
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
try:
# start training
for j in range(epochs):
mean_loss = 0
for i in range(n):
_, l = sess.run([self.train_op, self.loss],
feed_dict = fetch_dict(trainset)
)
mean_loss += l
print(f'>> [{j}] train loss at : {mean_loss/n}')
saver.save(sess, self.ckpt_path + self.model_name + '.ckpt', global_step=i)
#
# evaluate
testloss = sess.run([self.dec_loss],
feed_dict = fetch_dict(testset, keep_prob=1.)
)
print(f'test loss : {testloss}')
except KeyboardInterrupt:
print(f'\n>> Interrupted by user at iteration {j}')
def train_alternate(self, trainset, testset, epochs=100, n=100):
def fetch_dict(datagen, keep_prob=0.5):
bx, by = datagen.__next__()
by_dec = np.zeros_like(by).T
by_dec[1:] = by.T[:-1]
by_dec = by_dec.T
feed_dict = {
self.xs_ : bx,
self.ys_ : by,
self.dec_inputs_ : by_dec,
self.keep_prob_ : keep_prob
}
return feed_dict
##
# setup session
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# get last checkpoint
ckpt = tf.train.get_checkpoint_state(self.ckpt_path)
# verify it
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
try:
# start training
for j in range(epochs):
mean_loss = 0
for i in range(n): # train decoder loss with 70% probability
if np.random.rand() < 0.7:
_, l = sess.run([self.dec_train_op, self.dec_loss],
feed_dict = fetch_dict(trainset)
)
mean_loss += l
else: # train encoder lang model with 30% probability
_, l = sess.run([self.enc_train_op, self.enc_loss],
feed_dict = fetch_dict(trainset)
)
mean_loss += l
print(f'>> [{j}] train loss at : {mean_loss/n}')
saver.save(sess, self.ckpt_path + self.model_name + '.ckpt', global_step=i)
#
# evaluate
testloss = sess.run([self.dec_loss],
feed_dict = fetch_dict(testset, keep_prob=1.)
)
print(f'test loss : {testloss}')
except KeyboardInterrupt:
print(f'\n>> Interrupted by user at iteration {j}')
if __name__ == '__main__':
#
# gather data
metadata, idx_q, idx_a = data.load_data(PATH='datasets/twitter/')
# split data
(trainX, trainY), (testX, testY), (validX, validY) = data_utils.split_dataset(idx_q, idx_a)
#
# prepare train set generator
# set batch_size
batch_size = 16
trainset = data_utils.rand_batch_gen(trainX, trainY, batch_size)
testset = data_utils.rand_batch_gen(testX, testY, batch_size=1024)
###
# infer vocab size
vocab_size = len(metadata['idx2w'])
#
# create a model
model = enc_lm_seq2seq(state_size=1024, vocab_size=vocab_size, num_layers=3)
# train
model.train_alternate(trainset, testset, n=1000)
|
|
""" experimental data """
from collections import defaultdict
from itertools import compress
import logging
import pickle
from urllib.request import urlopen
import numpy as np
import yaml
from . import cachedir, systems
# TODO improve flow cumulant observable keys:
# cumulant v_n{k} should have obs == 'vnk' and subobs == (n, k)
class HEPData:
"""
Interface to a HEPData yaml file.
Ignore centrality bins above the specified `maxcent`.
"""
def __init__(self, inspire_rec, table, version=1, maxcent=80):
cachefile = (
cachedir / 'hepdata' /
'ins{}_table{}.pkl'.format(inspire_rec, table)
)
logging.debug('loading hepdata record %s table %s', inspire_rec, table)
if cachefile.exists():
logging.debug('reading from cache')
with cachefile.open('rb') as f:
self.data = pickle.load(f)
else:
logging.debug('not found in cache, downloading from hepdata.net')
cachefile.parent.mkdir(exist_ok=True)
with cachefile.open('wb') as f, urlopen(
'https://hepdata.net/download/table/'
'ins{}/Table{}/{}/yaml'.format(inspire_rec, table, version)
) as u:
self.data = yaml.load(u)
pickle.dump(self.data, f, protocol=pickle.HIGHEST_PROTOCOL)
# extract centrality bins
for x in self.data['independent_variables']:
if x['header']['name'].lower() == 'centrality':
try:
cent = [(v['low'], v['high']) for v in x['values']]
except KeyError:
# try to guess bins from midpoints
mids = [v['value'] for v in x['values']]
width = set(a - b for a, b in zip(mids[1:], mids[:-1]))
if len(width) > 1:
raise ValueError('variable bin widths')
d = width.pop() / 2
cent = [(m - d, m + d) for m in mids]
break
# select bins whose upper edge is <= maxcent
self._centselectors = [c[1] <= maxcent for c in cent]
cent = self._filtercent(cent)
# save centrality bins and midpoints as public attribute
self.cent = dict(
cent=cent,
x=np.array([(a + b)/2 for a, b in cent])
)
def _filtercent(self, values):
"""
Filter `values` by the centrality selectors created in the constructor
(i.e. ignore bins above maxcent).
"""
return list(compress(values, self._centselectors))
def x(self, name):
"""
Get an independent variable ("x" data) with the given name.
"""
for x in self.data['independent_variables']:
if x['header']['name'] == name:
return self._filtercent(x['values'])
def y(self, name=None, **quals):
"""
Get a dependent variable ("y" data) with the given name and qualifiers.
"""
for y in self.data['dependent_variables']:
if name is None or y['header']['name'] == name:
y_quals = {q['name']: q['value'] for q in y['qualifiers']}
if all(y_quals[k] == v for k, v in quals.items()):
return self._filtercent(y['values'])
def dataset(self, name=None, **quals):
"""
Return a dict containing y values and errors along with centrality
data. Arguments are passed directly to self.y().
"""
y = []
yerr = defaultdict(list)
for v in self.y(name, **quals):
y.append(v['value'])
for err in v['errors']:
try:
e = err['symerror']
except KeyError:
e = err['asymerror']
if abs(e['plus']) != abs(e['minus']):
raise RuntimeError(
'asymmetric errors are not implemented'
)
e = abs(e['plus'])
yerr[err.get('label', 'sum')].append(e)
return dict(
y=np.array(y),
yerr={k: np.array(v) for k, v in yerr.items()},
**self.cent
)
def get_calibration_data():
"""
Experimental data for model calibration.
"""
data = {s: {} for s in systems}
# PbPb2760 and PbPb5020 dNch/deta
for system, args, name in [
('PbPb2760', (880049, 1), 'D(N)/DETARAP'),
('PbPb5020', (1410589, 2),
r'$\mathrm{d}N_\mathrm{ch}/\mathrm{d}\eta$'),
]:
data[system]['dNch_deta'] = {None: HEPData(*args).dataset(name)}
# PbPb2760 identified dN/dy and mean pT
system = 'PbPb2760'
for obs, table, combine_func in [
('dN_dy', 31, np.sum),
('mean_pT', 32, np.mean),
]:
data[system][obs] = {}
d = HEPData(1222333, table)
for key, re_products in [
('pion', ['PI+', 'PI-']),
('kaon', ['K+', 'K-']),
('proton', ['P', 'PBAR']),
]:
dsets = [
d.dataset(RE='PB PB --> {} X'.format(i))
for i in re_products
]
data[system][obs][key] = dict(
y=combine_func([d['y'] for d in dsets], axis=0),
yerr={
e: combine_func([d['yerr'][e] for d in dsets], axis=0)
for e in dsets[0]['yerr']
},
**d.cent
)
# PbPb2760 and PbPb5020 flows
for system, tables in [
('PbPb5020', [1, 2, 2]),
('PbPb2760', [3, 4, 4]),
]:
data[system]['vn'] = {}
for n, t in enumerate(tables, start=2):
data[system]['vn'][n] = HEPData(1419244, t).dataset(
'V{}{{2, |DELTAETA|>1}}'.format(n)
)
return data
data = get_calibration_data()
def get_extra_data():
"""
Experimental data for model verification. These observables require many
more model events to compute and thus are not useful for calibration.
"""
data = {s: {} for s in systems}
# PbPb2760 flow correlations
for obs, table in [('sc', 1), ('sc_central', 3)]:
d = HEPData(1452590, table)
data['PbPb2760'][obs] = {
mn: d.dataset('SC({},{})'.format(*mn))
for mn in [(3, 2), (4, 2)]
}
# PbPb2760 central flows vn{2}
system, obs = 'PbPb2760', 'vn_central'
data[system][obs] = {}
for n, table in [(2, 11), (3, 12)]:
dset = HEPData(900651, table).dataset()
# the (unlabeled) errors in the dataset are actually stat
dset['yerr']['stat'] = dset['yerr'].pop('sum')
# estimate sys error fraction
dset['yerr']['sys'] = {2: .025, 3: .040}[n]*dset['y']
data[system][obs][n] = dset
# PbPb2760 and PbPb5020 v2{4}
for system, table in [('PbPb2760', 3), ('PbPb5020', 1)]:
cent = []
x = []
y = []
yerr = dict(stat=[], sys=[])
# discard missing values in these datasets
# TODO handle this in HEPData class
d = HEPData(1419244, table)
for v, x_, cent_ in zip(d.y('V2{4}'), d.cent['x'], d.cent['cent']):
value = v['value']
if value == '-':
continue
cent.append(cent_)
x.append(x_)
y.append(value)
for e in v['errors']:
yerr[e['label']].append(e['symerror'])
# fix incorrect data point
if system == 'PbPb5020':
y[0] = .036
yerr['stat'][0] = .003
yerr['sys'][0] = .0006
data[system]['vn4'] = {2: dict(
cent=cent,
x=np.array(x),
y=np.array(y),
yerr={k: np.array(v) for k, v in yerr.items()}
)}
return data
extra_data = get_extra_data()
def cov(x, y, yerr, stat_frac=1e-4, sys_corr_length=100, **kwargs):
"""
Estimate a covariance matrix from stat and sys errors.
"""
try:
stat = yerr['stat']
sys = yerr['sys']
except KeyError:
stat = y * stat_frac
sys = yerr['sum']
return np.diag(stat**2) + (
np.exp(-.5*(np.subtract.outer(x, x)/sys_corr_length)**2) *
np.outer(sys, sys)
)
def print_data(d, indent=0):
"""
Pretty print the nested data dict.
"""
prefix = indent * ' '
for k in sorted(d):
v = d[k]
k = prefix + str(k)
if isinstance(v, dict):
print(k)
print_data(v, indent + 1)
else:
if k.endswith('cent'):
v = ' '.join(
str(tuple(int(j) if j.is_integer() else j for j in i))
for i in v
)
elif isinstance(v, np.ndarray):
v = str(v).replace('\n', '')
print(k, '=', v)
if __name__ == '__main__':
print_data(data)
print_data(extra_data)
|
|
#!/usr/bin/env python
'''
See GDoc "Prepare GYB"
This data-prep script MUST be modified for the needs of each project
Requires stands, treeslist and climate tables
'''
import os
import sqlite3
import json
import glob
import sys
class GYBError(Exception):
pass
TF = [
# See "G:\projects\projects2011\LandOwnerTools\util\scripts\FVS TREEFMT Details.xlsx"
#("Name", "Column", "fvsformat", "valtype", "valwidth", "cumulative", "valdec")
("Plot ID", None, "I6", "int", 6, 6, None),
("Tree Number", "TreeID", "I3", "int", 3, 9, None),
("Tree Count", "TPA", "F6.0", "float", 6, 15, 0),
("Tree History", "TreeHist", "I1", "int", 1, 16, None),
("Species", "{{variant}}_Spp", "A3", "str", 3, 19, None),
("Diameter at Breast Height", "DBH", "F5.1", "float", 5, 24, 1),
("DBH Increment", "Diam_Inc", "F3.1", "float", 3, 27, 1),
("Live Height", "HT_ft", "F3.0", "float", 3, 30, 0),
("Height to Top Kill", "HT_kill", "F3.0", "float", 3, 33, 0),
("Height Increment", "HT_Inc", "F4.1", "float", 4, 37, 1),
("Crown Ratio Code", "Crown", "I1", "int", 1, 38, None),
("Damage Code 1", "Dmg1", "I2", "int", 2, 40, None),
("Severity Code 1", "Sev1", "I2", "int", 2, 42, None),
("Damage Code 2", "Dmg2", "I2", "int", 2, 44, None),
("Severity Code 2", "Sev2", "I2", "int", 2, 46, None),
("Damage Code 3", "Dmg3", "I2", "int", 2, 48, None),
("Severity Code 3", "Sev3", "I2", "int", 2, 50, None),
("Tree Value Class Code", "TreeValue", "I1", "int", 1, 51, None),
("Cut/Leave Prescription Code", "RxREC", "I1", "int", 1, 52, None),
("Plot slope percent ", None, "I2", "int", 2, 54, None),
("Plot aspect in degrees", None, "I3", "int", 3, 57, None),
("Plot habitat type code", None, "I3", "int", 3, 60, None),
("Plot topographic position code", None, "I1", "int", 1, 61, None),
("Plot site preparation code", None, "I1", "int", 1, 62, None),
("Tree Age", "Tree_Age", "F3.0", "float", 3, 65, 0),
]
# these are built into build_keys.py, no need to specify
# unless you want to override them
default_site_classes = {
"1": "SiteCode DF 148 1",
"2": "SiteCode DF 125 1",
"3": "SiteCode DF 105 1",
"4": "SiteCode DF 85 1",
"5": "SiteCode DF 62 1"}
SITE_CLASSES = {
# "PN": default_site_classes,
# "SO": default_site_classes,
# "CA": default_site_classes,
# "NC": default_site_classes,
# "EC": default_site_classes,
# "BM": default_site_classes,
"WC": {
"1": "SiteCode DF 200 1",
"2": "SiteCode DF 170 1",
"3": "SiteCode DF 140 1",
"4": "SiteCode DF 110 1",
"5": "SiteCode DF 80 1"
}
}
def make_fvsfile(stand, outdir, con, variant):
# query treelist.db for the condid
# construct lines and write to file
# make sure you've got an index on the standid column
# CREATE INDEX gnn_fcid_idx ON treelive(GNN_FCID);
standid = stand['standid']
fcid = stand['gnnfcid']
path = os.path.join(outdir, "%d.fvs" % standid)
cols = [x[1].replace("{{variant}}", variant) for x in TF if x[1] is not None]
with open(path, 'w') as fh:
con.row_factory = sqlite3.Row
cur = con.cursor()
sql = """SELECT %s
FROM treelist
WHERE GNN_FCID = %d;""" % (', '.join(cols), fcid)
for i, row in enumerate(cur.execute(sql)):
#line = " ".join([str(x) for x in row])
line = ''
for item in TF:
col = item[1]
if col is not None:
col = str(col.replace("{{variant}}", variant))
valtype = item[3]
valwidth = item[4]
dec = item[6]
if item[0] == "Plot ID": # special case
val = standid
elif col is None:
val = ''
valtype = 'str'
else:
val = row[col]
if valtype == "str":
# assert len(val.strip()) <= valwidth, (col, val, valwidth)
pass
elif valtype == "int":
# special case, convert pct to crown code
if col == "Crown":
try:
val = int(val)
val = 1 + int( (val - 1) / 10)
if val > 9:
val = 9
except ValueError:
val = ''
if val != '':
val = str(int(val))
if col == "TreeID": # special case, just use autonum
val = str(i)
# assert len(val.strip()) <= valwidth, (col, val, valwidth)
elif valtype == "float":
if val != '':
val = float(val)
mult = 10 ** dec
val = val * mult
val = str(int(round(val)))
# assert len(val.strip()) <= valwidth, (col, val, valwidth)
fmt = '{0: >%d}' % valwidth
fval = fmt.format(val)
if len(fval) > valwidth:
if col == 'Tree_Age':
# special case, tree age >= 1000 gets assigned to 999
print "WARNING: Tree Age is '%s', setting to 999" % (val, )
val = '999'
fval = '999'
else:
print "WARNING: %s is '%s' should only be %d wide!!" % (col,
val, valwidth)
line += fval[-1 * valwidth:] # Just take the trailing chars
#print line
fh.write(line)
fh.write("\n")
def make_stdinfofile(stand, outdir, con):
'''
field 1: Numeric Region and National Forest code where stand is located.
RFF where R = region, FF = 2-digit forest code
(NOTE: this is misleading! see fvs variant overviews)
field 2: Stand habitat type code or plant community code (ecological unit code in SN.)
field 3: Stand age in years.
field 4: Stand aspect in degrees (0 or 360 = north).
field 5: Stand slope percent.
field 6: Stand elevation in 100s of feet (10s of feet in AK variant). For example, a
code of 52 would mean elevation is 5200 feet (520 feet in AK).
field 7: Stand Latitude in degrees.
'''
# 1, 4, 5, 6, 7 from shapefile; field names = ['location', 'aspect', 'slope', 'elev', 'lat']
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 2 (habitat code) is hard to determine.
# (may be able to construct via GNN stand-level forest types?)
# It drives site tree/index and max density but we override the first two anyways
# LEAVE BLANK AND USE DEFUALT FOR NOW - ie accept the default max stand density
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
standid = stand['standid']
fcid = stand['gnnfcid']
path = os.path.join(outdir, "%d.std" % standid)
variant = stand['variant']
default_habitat = {
'PN': '40', # CHS133
'WC': '52', # CFS551
'NC': 'CWC221', #
'CA': '46', # CWC221
'SO': '49', # CPS111
'EC': '114'
}
habitat = default_habitat.get(variant.upper(), "")
cur = con.cursor()
sql = """SELECT TPA, DBH, HT_ft, Tree_Age
FROM treelist
WHERE GNN_FCID = %d
AND TreeHist = 1;""" % (fcid, )
treedata = list(cur.execute(sql))
if len(treedata) == 0:
warn = "WARNING, no treelist data for standid %s, fcid %s (skipping)" % (standid, fcid)
raise GYBError(warn)
return
# TODO age_dom or age_dom_no_rem ??
sql = """SELECT AGE_DOM_NO_REM as age, SDI_REINEKE, QMDA_DOM, HCB, OGSI
FROM SPPSZ_ATTR_ALL
WHERE FCID = %d;""" % (fcid, )
data = list(cur.execute(sql))
age = float(data[0]['age'])
if age == 0:
"""
GNN AGE has failed us, try to apply a simple linear regression to guess age
Using the 3994 FCIDs on BLM property that had a valid age
predict AGE_DOM_NO_REM using AGE_DOM_NO_REM ~ SDI_REINEKE + QMDA_DOM + HCB + OGSI - 1
SDI_REINEKE 0.038342
QMDA_DOM 1.257999
HCB -2.183154
OGSI 1.213396
OLS Regression Results
==============================================================================
Dep. Variable: AGE_DOM_NO_REM R-squared: 0.853
Model: OLS Adj. R-squared: 0.853
Method: Least Squares F-statistic: 6303.
Date: Mon, 17 Feb 2014 Prob (F-statistic): 0.00
Time: 08:53:24 Log-Likelihood: -22160.
No. Observations: 4355 AIC: 4.433e+04
Df Residuals: 4351 BIC: 4.435e+04
Df Model: 4
===============================================================================
coef std err t P>|t| [95.0% Conf. Int.]
-------------------------------------------------------------------------------
SDI_REINEKE 0.0383 0.004 8.696 0.000 0.030 0.047
QMDA_DOM 1.2580 0.045 27.694 0.000 1.169 1.347
HCB -2.1832 0.138 -15.858 0.000 -2.453 -1.913
OGSI 1.2134 0.050 24.180 0.000 1.115 1.312
==============================================================================
Omnibus: 1047.808 Durbin-Watson: 1.752
Prob(Omnibus): 0.000 Jarque-Bera (JB): 6835.787
Skew: 0.984 Prob(JB): 0.00
Kurtosis: 8.813 Cond. No. 78.1
==============================================================================
"""
# apply regression coefficients, no intercept
try:
age = (float(data[0]['SDI_REINEKE']) * 0.038342) + \
(float(data[0]['QMDA_DOM']) * 1.257999) + \
(float(data[0]['HCB']) * -2.183154) + \
(float(data[0]['OGSI']) * 1.213396)
except:
import ipdb; ipdb.set_trace()
if age < 0:
age = 0
with open(path, 'w') as fh:
line = concat_fvs_line("STDINFO", [
stand['location'],
habitat,
int(age),
int(stand['aspect']),
int(stand['slope']),
int(round(stand['elev'] / 100.0)), # elev assumed to be in ft, FVS expects ft/100
int(stand['lat']),
])
fh.write(line)
fh.write("\n")
def concat_fvs_line(keyword, fields):
col = "%10s"
line = "{0:<10}".format(keyword.upper(), )
for field in fields:
line += col % field
return line
def make_climatefile(stand, outdir, con):
# query climate.db
# write to .cli
# return available climates
# make sure you've got an index on the standid column
# CREATE INDEX stand_idx ON fvsclimattrs(StandID);
standid = stand['standid']
#fcid = stand['gnnfcid']
path = os.path.join(outdir, "%d.cli" % standid)
con.row_factory = None
con.row_factory = sqlite3.Row
cur = con.cursor()
columns_query = "PRAGMA table_info(climate)"
cur.execute(columns_query)
header = ','.join([x['name'] for x in cur.fetchall()])
with open(path, 'w') as fh:
fh.write(header)
fh.write("\n")
sql = """SELECT %s
FROM climate
WHERE StandID = %d;""" % (header, standid)
i = None
noclim = None
for i, row in enumerate(cur.execute(sql)):
if row['Year'] == 1990 and row['Scenario'] == "Ensemble_rcp60":
# grab the line for NoClimate
noclim = list(row)
fh.write(",".join([str(x) for x in row]))
fh.write("\n")
if not i:
warn = "WARNING, Climate data missing for standid %s (skipping)" % standid
raise GYBError(warn)
if not noclim:
warn = "WARNING, Could not find the 1990 ensemble rcp60 scenario to use as NoClimate %s (skipping)" % standid
raise GYBError(warn)
# write noclim
noclim[1] = "NoClimate"
for year in [1990, 2030, 2060, 2090]:
noclim[2] = year
fh.write(",".join([str(x) for x in noclim]))
fh.write("\n")
con.row_factory = sqlite3.Row
def make_sitefile(stand, outdir):
# read site from stand and write number to file
# return site indecies
standid = stand['standid']
path = os.path.join(outdir, "%d.site" % standid)
with open(path, 'w') as fh:
fh.write(str(stand['sitecls']))
fh.write("\n")
def make_rxfile(stand, outdir):
# read variant and rxs from stand
# write to file in csv format, no header. return them
standid = stand['standid']
variant = stand['variant']
rxtxt = stand['rx']
if rxtxt:
rxs = [int(x) for x in rxtxt.strip().split(",")]
else:
rxs = ["*"]
path = os.path.join(outdir, "%d.rx" % standid)
with open(path, 'w') as fh:
for rx in rxs:
fh.write("%s,%s" % (variant, rx))
fh.write("\n")
return variant, rxs
def get_climates(con):
# connect to climatedb and find all unique climate names
cur = con.cursor()
sql = """SELECT DISTINCT Scenario FROM climate"""
scenarios = [x[0] for x in cur.execute(sql)]
return scenarios
def stand_iter(batch, con):
# import shapefile
# sf = shapefile.Reader(shp)
# fields = [x[0] for x in sf.fields[1:]]
# for record in sf.iterRecords():
# dd = dict(zip(fields, record))
# yield dd
cur = con.cursor()
sql = """SELECT * FROM stands WHERE batch='%s'""" % batch #TODO unsafe
for i, row in enumerate(cur.execute(sql)):
yield dict(zip(row.keys(), row))
def write_config(con, outdir):
# write config.json
# default to 0, 5, 10, 15 offsets
clims = get_climates(con)
clims.append("NoClimate")
data = {
"climate_scenarios": clims,
"site_classes": SITE_CLASSES,
"offsets": [0, 10]
}
with open(os.path.join(outdir, 'config.json'), 'w') as fh:
fh.write(json.dumps(data, indent=2))
#------------------------------------------------------------------------------#
def main(batch):
print "Starting to prepare GYB batch (%s)" % batch
outdir = "./%s/cond" % batch
if os.path.exists(outdir):
import shutil
shutil.rmtree(outdir)
os.makedirs(outdir)
print "Writing to %s" % outdir
conn = sqlite3.connect('/home/mperry/projects/BLM_climate/Batch1/master.sqlite')
conn.row_factory = sqlite3.Row
for stand in stand_iter(batch, conn):
try:
make_climatefile(stand, outdir, conn)
make_stdinfofile(stand, outdir, conn)
make_sitefile(stand, outdir)
variant, _ = make_rxfile(stand, outdir)
make_fvsfile(stand, outdir, conn, variant)
# print "\t", stand['standid'], "complete"
except GYBError as exc:
print "\t", exc.message
# clean up and just skip it
for path in glob.glob(os.path.join(outdir, "%s*" % stand['standid'])):
os.remove(path)
print "Writing config"
write_config(conn, os.path.join(outdir, '..'))
print "DONE"
batch = sys.argv[1]
main(batch)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
import os
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
import pickle
import luigi
from luigi.contrib.hadoop import create_packages_archive
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
- job_name_format: String that can be passed in to customize the job name
string passed to qsub; e.g. "Task123_{task_family}_{n_cpu}...".
- job_name: Exact job name to pass to qsub.
- run_locally: Run locally instead of on the cluster.
- poll_time: the length of time to wait in order to poll qstat
- dont_remove_tmp_dir: Instead of deleting the temporary directory, keep it.
- no_tarball: Don't create a tarball of the luigi project directory. Can be
useful to reduce I/O requirements when the luigi directory is accessible
from cluster nodes already.
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
job_name_format = luigi.Parameter(
significant=False, default=None, description="A string that can be "
"formatted with class variables to name the job with qsub.")
job_name = luigi.Parameter(
significant=False, default=None,
description="Explicit job name given via qsub.")
run_locally = luigi.BoolParameter(
significant=False,
description="run locally instead of on the cluster")
poll_time = luigi.IntParameter(
significant=False, default=POLL_TIME,
description="specify the wait time to poll qstat for the job status")
dont_remove_tmp_dir = luigi.BoolParameter(
significant=False,
description="don't delete the temporary directory used (for debugging)")
no_tarball = luigi.BoolParameter(
significant=False,
description="don't tarball (and extract) the luigi project files")
def __init__(self, *args, **kwargs):
super(SGEJobTask, self).__init__(*args, **kwargs)
if self.job_name:
# use explicitly provided job name
pass
elif self.job_name_format:
# define the job name with the provided format
self.job_name = self.job_name_format.format(
task_family=self.task_family, **self.__dict__)
else:
# default to the task family
self.job_name = self.task_family
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = self.task_id + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
if not self.no_tarball:
# Make sure that all the class's dependencies are tarred and available
# This is not necessary if luigi is importable from the cluster node
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
if self.run_locally:
self.work()
else:
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
with self.no_unpicklable_properties():
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
with open(self.job_file, "w") as f:
f.write(d)
else:
with open(self.job_file, "wb") as f:
pickle.dump(self, f)
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}" "{2}"'.format(
runner_path, self.tmp_dir, os.getcwd()) # enclose tmp_dir in quotes to protect from special escape chars
if self.no_tarball:
job_str += ' "--no-tarball"'
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if (self.tmp_dir and os.path.exists(self.tmp_dir) and not self.dont_remove_tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
subprocess.call(["rm", "-rf", self.tmp_dir])
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(self.poll_time)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
|
|
"""Logic expressions handling
NOTE
----
at present this is mainly needed for facts.py , feel free however to improve
this stuff for general purpose.
"""
from __future__ import print_function, division
from sympy.core.compatibility import range
def _fuzzy_group(args, quick_exit=False):
"""Return True if all args are True, None if there is any None else False
unless ``quick_exit`` is True (then return None as soon as a second False
is seen.
``_fuzzy_group`` is like ``fuzzy_and`` except that it is more
conservative in returning a False, waiting to make sure that all
arguments are True or False and returning None if any arguments are
None. It also has the capability of permiting only a single False and
returning None if more than one is seen. For example, the presence of a
single transcendental amongst rationals would indicate that the group is
no longer rational; but a second transcendental in the group would make the
determination impossible.
Examples
========
>>> from sympy.core.logic import _fuzzy_group
By default, multiple Falses mean the group is broken:
>>> _fuzzy_group([False, False, True])
False
If multiple Falses mean the group status is unknown then set
`quick_exit` to True so None can be returned when the 2nd False is seen:
>>> _fuzzy_group([False, False, True], quick_exit=True)
But if only a single False is seen then the group is known to
be broken:
>>> _fuzzy_group([False, True, True], quick_exit=True)
False
"""
saw_other = False
for a in args:
if a is True:
continue
if a is None:
return
if quick_exit and saw_other:
return
saw_other = True
return not saw_other
def fuzzy_bool(x):
"""Return True, False or None according to x.
Whereas bool(x) returns True or False, fuzzy_bool allows
for the None value and non-false values (which become None), too.
Examples
========
>>> from sympy.core.logic import fuzzy_bool
>>> from sympy.abc import x
>>> fuzzy_bool(x), fuzzy_bool(None)
(None, None)
>>> bool(x), bool(None)
(True, False)
"""
if x is None:
return None
if x in (True, False):
return bool(x)
def fuzzy_and(args):
"""Return True (all True), False (any False) or None.
Examples
========
>>> from sympy.core.logic import fuzzy_and
>>> from sympy import Dummy
If you had a list of objects to test the commutivity of
and you want the fuzzy_and logic applied, passing an
iterator will allow the commutativity to only be computed
as many times as necessary. With this list, False can be
returned after analyzing the first symbol:
>>> syms = [Dummy(commutative=False), Dummy()]
>>> fuzzy_and(s.is_commutative for s in syms)
False
That False would require less work than if a list of pre-computed
items was sent:
>>> fuzzy_and([s.is_commutative for s in syms])
False
"""
rv = True
for ai in args:
ai = fuzzy_bool(ai)
if ai is False:
return False
if rv: # this will stop updating if a None is ever trapped
rv = ai
return rv
def fuzzy_not(v):
"""
Not in fuzzy logic
Return None if `v` is None else `not v`.
Examples
========
>>> from sympy.core.logic import fuzzy_not
>>> fuzzy_not(True)
False
>>> fuzzy_not(None)
>>> fuzzy_not(False)
True
"""
if v is None:
return v
else:
return not v
def fuzzy_or(args):
"""
Or in fuzzy logic. Returns True (any True), False (all False), or None
See the docstrings of fuzzy_and and fuzzy_not for more info. fuzzy_or is
related to the two by the standard De Morgan's law.
>>> from sympy.core.logic import fuzzy_or
>>> fuzzy_or([True, False])
True
>>> fuzzy_or([True, None])
True
>>> fuzzy_or([False, False])
False
>>> print(fuzzy_or([False, None]))
None
"""
return fuzzy_not(fuzzy_and(fuzzy_not(i) for i in args))
class Logic(object):
"""Logical expression"""
# {} 'op' -> LogicClass
op_2class = {}
def __new__(cls, *args):
obj = object.__new__(cls)
obj.args = args
return obj
def __getnewargs__(self):
return self.args
def __hash__(self):
return hash( (type(self).__name__,) + tuple(self.args) )
def __eq__(a, b):
if not isinstance(b, type(a)):
return False
else:
return a.args == b.args
def __ne__(a, b):
if not isinstance(b, type(a)):
return True
else:
return a.args != b.args
def __lt__(self, other):
if self.__cmp__(other) == -1:
return True
return False
def __cmp__(self, other):
if type(self) is not type(other):
a = str(type(self))
b = str(type(other))
else:
a = self.args
b = other.args
return (a > b) - (a < b)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(a) for a in self.args))
__repr__ = __str__
@staticmethod
def fromstring(text):
"""Logic from string with space around & and | but none after !.
e.g.
!a & b | c
"""
lexpr = None # current logical expression
schedop = None # scheduled operation
for term in text.split():
# operation symbol
if term in '&|':
if schedop is not None:
raise ValueError(
'double op forbidden: "%s %s"' % (term, schedop))
if lexpr is None:
raise ValueError(
'%s cannot be in the beginning of expression' % term)
schedop = term
continue
if '&' in term or '|' in term:
raise ValueError('& and | must have space around them')
if term[0] == '!':
if len(term) == 1:
raise ValueError('do not include space after "!"')
term = Not(term[1:])
# already scheduled operation, e.g. '&'
if schedop:
lexpr = Logic.op_2class[schedop](lexpr, term)
schedop = None
continue
# this should be atom
if lexpr is not None:
raise ValueError(
'missing op between "%s" and "%s"' % (lexpr, term))
lexpr = term
# let's check that we ended up in correct state
if schedop is not None:
raise ValueError('premature end-of-expression in "%s"' % text)
if lexpr is None:
raise ValueError('"%s" is empty' % text)
# everything looks good now
return lexpr
class AndOr_Base(Logic):
def __new__(cls, *args):
bargs = []
for a in args:
if a == cls.op_x_notx:
return a
elif a == (not cls.op_x_notx):
continue # skip this argument
bargs.append(a)
args = sorted(set(cls.flatten(bargs)), key=hash)
for a in args:
if Not(a) in args:
return cls.op_x_notx
if len(args) == 1:
return args.pop()
elif len(args) == 0:
return not cls.op_x_notx
return Logic.__new__(cls, *args)
@classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except IndexError:
break
if isinstance(arg, Logic):
if isinstance(arg, cls):
args_queue.extend(arg.args)
continue
res.append(arg)
args = tuple(res)
return args
class And(AndOr_Base):
op_x_notx = False
def _eval_propagate_not(self):
# !(a&b&c ...) == !a | !b | !c ...
return Or( *[Not(a) for a in self.args] )
# (a|b|...) & c == (a&c) | (b&c) | ...
def expand(self):
# first locate Or
for i in range(len(self.args)):
arg = self.args[i]
if isinstance(arg, Or):
arest = self.args[:i] + self.args[i + 1:]
orterms = [And( *(arest + (a,)) ) for a in arg.args]
for j in range(len(orterms)):
if isinstance(orterms[j], Logic):
orterms[j] = orterms[j].expand()
res = Or(*orterms)
return res
else:
return self
class Or(AndOr_Base):
op_x_notx = True
def _eval_propagate_not(self):
# !(a|b|c ...) == !a & !b & !c ...
return And( *[Not(a) for a in self.args] )
class Not(Logic):
def __new__(cls, arg):
if isinstance(arg, str):
return Logic.__new__(cls, arg)
elif isinstance(arg, bool):
return not arg
elif isinstance(arg, Not):
return arg.args[0]
elif isinstance(arg, Logic):
# XXX this is a hack to expand right from the beginning
arg = arg._eval_propagate_not()
return arg
else:
raise ValueError('Not: unknown argument %r' % (arg,))
@property
def arg(self):
return self.args[0]
Logic.op_2class['&'] = And
Logic.op_2class['|'] = Or
Logic.op_2class['!'] = Not
|
|
#!/usr/bin/env python
# Copyright (c) 2014, Illumina
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pysam
import math
import collections
import os
import sys
import numpy as np
from intervalNode import IntervalNode
def update_progress(progress):
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if progress >= 1:
progress = 1
status = "Done...\r\n"
progress = round(progress,1)
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def normpdf(xl, mu=0, sigma=1):
l = len(xl)
yl = np.zeros(l)
for i in range(0,l):
u = float((xl[i]-mu) / abs(sigma))
yl[i] = math.exp(-u*u/2) / (math.sqrt(2*math.pi) * abs(sigma))
return yl
def meansd(frq):
"""
Function to calculate mean and standard deviation from a dictionary of frequencies.
Return a tuple of (mean, std).
Arguments:
frq: a dictionary of frequencies: key = insert size, value = frequency
"""
keys = frq.keys()
keys.sort()
w = np.empty(len(keys),np.float)
for i,k in enumerate(keys):
w[i] = frq[k]
x = np.abs(keys)
xbar = np.average(x,weights=w)
xsd = np.average(np.sqrt(np.power((x - xbar),2)),weights=w)
return (xbar,xsd)
def MAD(frq):
"""
Function to calculate median and median absolute deviation (MAD) from a dictionary of frequencies.
Return a tuple of (mean, MAD).
Arguments:
frq: a dictionary of frequencies: key = insert size, value = frequency
"""
all_lengths = []
for k, v in frq.iteritems():
new_vals = [k] * int(v)
all_lengths.extend(new_vals)
all_lengths = np.array(sorted(all_lengths))
mid = len(all_lengths)/2
median = all_lengths[mid]
residuals = sorted(abs(all_lengths - median)) # difference between val and median
MAD = residuals[mid] # median of residuals
#print MAD
return median, MAD
def find_intersections(tree, s, e):
"""
Function to find inserts that bridge a region of a contig.
Arguments:
tree: interval tree of start/end positions of mate pair inserts
s: start position of interval on contig
e: end position of interval on contig
Return a list of nodes from tree whose start and end positions span the interval given by s and e.
"""
# find all reads that bridge a gap
intersections = []
tree.intersect(s, e, lambda x: intersections.append(x)) # see interval node for implementation
return intersections
def get_insertlengths(reads):
"""
Function to calculate interval sizes of a set of mate pairs.
Arguments:
reads: a list of mate pairs as interal tree node objects
Return two numpy arrays: an array of insert sizes (integers) and an array of strand alignments (boolean)
"""
distances = []
strands = []
for read in reads:
distances.append(read.end - read.start) # insert length
strands.append(read.other[1]) # boolean: correct alignment
return np.array(distances), np.array(strands)
def probability_of_readlength(read_length, mu, sigma, pi1, L):
"""
Function to calculate the probability that mate pair insert sizes are not anomalous.
Return an array of probabilites.
Arguments:
read_length: a numpy array of insert sizes
mu: mean insert size (float)
sigma: insert size standard deviation (float)
pi1: prior probability of being anomalous (float)
L: length of contig to which the reads in read_length are aligned
"""
p_0 = pi1 * (1 / float(L)) # anomaly
# probability of drawing from a gaussian with mean mu and std sigma
p_1 = (1 - pi1) * normpdf(read_length,mu,sigma)
p_total = p_1 / (p_0 + p_1)
return p_total
class aligned_assembly:
"""
Class to hold a set of mate pair or paired end reads aligned to the scaffolded genome assembly
"""
def __init__(self, bamfile, fastafile, min_size, threshold, step, window, minmapq, maxinsert, fraction, prior):
"""
Initialiser function for the aligned assembly class.
Arguments:
bamfile: a sorted bam file of reads aligned to the scaffolded assembly
fastafile: the scaffolded assembly in fasta format
min_size: the minimum size contig to consider for analysis (integer)
threshold: threshold in Z score below which a misassembly is called (float)
step: step size to walk contigs (integer)
window: width of window around each position from which mate pair insert sizes are fetched (integer)
minmapq: the minimum mapq value for which paired reads are evaluated (float)
maxinsert: the maximum insert size for which genome population statistics are calculated
fraction: minimum fraction of read pairs with correct orientation to call support for the assembly.
"""
# initialising user parameters
self.minmapq = minmapq
self.maxinsert = maxinsert
self.threshold = threshold
self.step = step
self.window = window
self.fraction = fraction
self.prior = prior
self.sam = pysam.Samfile(bamfile, "rb" )
self.fasta = pysam.Fastafile(fastafile)
self.min_size = min_size
# getting reads from bamfile
self.all_reads = self.sam.fetch()
self.references = self.sam.references
self.lengths = self.sam.lengths
# refdict: key=contig, val=contig length
# read_stock: key=contig, val=aligned reads
self.refdict = {}
self.read_stock = {}
for k,v in zip(self.references,self.lengths):
self.refdict[k]=v
self.read_stock[k] = self.get_reads(k, 0, v)
self.sizes = self.get_read_size_distribution()
self.isize_median, self.isize_MAD = MAD(self.sizes)
self.isize_mean, _ = meansd(self.sizes)
self.isize_sd = 1.4826 * self.isize_MAD
#print self.isize_sd, self.isize_MAD, 1.4826 * self.isize_MAD
def get_read_size_distribution(self):
"""
Function to calculate global insert size distribution across the whole assembly
Return a frequency table of insert sizes as a dictionary with key = insert size, value = frequency
"""
frq = collections.defaultdict(int) # dictionary of insert sizes
found = {}
for read in self.all_reads:
# accept read based on mapq, contig alignemnt and insert size
if (read.mapq > self.minmapq) and (read.rnext == read.tid) and (abs(read.tlen) < self.maxinsert):
if read.qname in found and found[read.qname][0]==read.tid:
mate = found[read.qname]
isize = abs(max( mate[1]+mate[2]-read.pos,read.pos+read.rlen-mate[1] ))
frq[isize] += 1
else:
found[read.qname] = (read.tid,read.pos,read.rlen)
return frq
def get_reads(self, ref, start, end):
"""
Function to fetch reads aligned to a specific part of the assembled genome and return a list of aligned reads, where each list entry is a tuple:
(read start position, read end position, read name, strand alignment) and strand alignment is a boolean indicating whether the two reads of a read pair align correctly to opposite strands.
Reads are fetched that align to contig "ref" between positions "start" and "end".
Arguments:
ref: the name of the contig from which aligned reads are to be fetched.
start: the position on the contig from which to start fetching aligned reads
end: the position on the contig from which to end fetching aligned reads
"""
# fetch all reads within a region
# insert size: gap between end of one mate and start of next
reads = self.sam.fetch(ref, start, end)
read_stock = []
found = {}
for read in reads:
if (read.rnext == read.tid):
if read.qname in found and found[read.qname][0]==read.tid: # if mate maps to same contig
mate = found[read.qname] # fetch mate
# correctly ordering mates
if mate[1] > read.pos:
start_pos = read.pos + read.rlen
end_pos = mate[1]
else:
start_pos = mate[1] + mate[2]
end_pos = read.pos
# add mates to list of mates on that contig
# include strand orientation info
correct_strands = ((read.is_reverse) and not (read.mate_is_reverse)) or ((read.mate_is_reverse) and not (read.is_reverse))
read_stock.append((start_pos, end_pos, read.qname, correct_strands))
else:
found[read.qname] = (read.tid,read.pos,read.rlen) # haven't reached mate yet
return read_stock
#@profile
def make_tree(self, ref):
"""
Function to construct an interval tree from reads aligning to a contig and return the interval tree.
The interval tree stores nodes with properties start (start postition of interval), end (end position of interval) and other,
which is a tuple of the mate pair name (string) and the strand alignment of the two paired reads (boolean).
Arguments:
ref: Reference ID of the contig for which the interval tree is to be constructed
"""
bridges = self.read_stock[ref]
# check if contig has any alignments
if not bridges:
return None
# insert first interval into tree
s1, e1, name, correct_strands = bridges[0]
tree = IntervalNode(s1, e1, other=(name, correct_strands))
# insert the rest of the intervals
for (start, end, name, correct_strands) in bridges[1:]:
tree = tree.insert(start, end, other=(name, correct_strands))
return tree
def get_read_mappings(self, ref):
"""
Function to calculate the fraction of reads pairs within a contig that align correctly to opposite strands.
Return five arrays: the positions at which strand alignment was evaluated, the fraction correctly aligned, the fraction incorrectly aligned to the same strand, the unmapped
fraction and the fraction that have some other alignment issue.
Arguments:
ref: the reference id of the contig to be evaulated
"""
dump_val = self.step
positions = []
same_strand = 0
opp_strand = 0
unmapped = 0
other = 0
# arrays of read mapping behaviour
good_ratio = []
unmapped_ratio = []
bad_ratio = []
other_ratio = []
mini_pos = []
reads = self.sam.fetch(reference = ref)
# note that iterating in this manner works because the bam file is sorted.
# create arrays containing fraction of correctly / incorrectly alinged reads
for i, r in enumerate(reads):
mini_pos.append(r.pos)
if r.mate_is_unmapped:
unmapped += 1
elif ((r.is_reverse) and not (r.mate_is_reverse)) or ((r.mate_is_reverse) and not (r.is_reverse)):
same_strand += 1
elif((r.is_reverse) and (r.mate_is_reverse)) or (not (r.mate_is_reverse) and not (r.is_reverse)):
opp_strand += 1
else:
other += 1
if (i+1) % dump_val == 0:
total = same_strand + opp_strand + unmapped + other
good_ratio.append(float(same_strand) / total)
bad_ratio.append(float(opp_strand) / total)
unmapped_ratio.append(float(unmapped) / total)
other_ratio.append(float(other) / total)
same_strand = 0
opp_strand = 0
unmapped = 0
other = 0
positions.append(np.mean(mini_pos))
mini_pos = []
return np.array(positions), np.array(good_ratio), np.array(bad_ratio), np.array(unmapped_ratio), np.array(other_ratio)
def get_mapping_anomalies(self):
"""
Function to determine the frequency of strand mapping anomalies across the entire genome assembly.
Calls get_read_mappings for each contig larger than the aligned_assembly.min_size and returns:
1) a dictionary with keys = contig reference IDs; values = list of positions and strand alignment ratios as described in get_read_mappings
2) a dictionary of anomalies wiht keys = contig reference IDs, values = [list of positions for which the ratio of correctly aligned strands < 0.75 (currently hard-coded), corresponding ratio of correctly aligned strands]
"""
mapping_ratios = {} # key=contig, val=list of arrays of mapping behaviours
anomalies = {}
for w, (ref, length) in enumerate(self.refdict.iteritems()):
if length > self.min_size: # consider only big contigs
positions, good_ratio, bad_ratio, unmapped_ratio, other_ratio = self.get_read_mappings(ref)
map_criterion = good_ratio < self.fraction
pos_anomalies = positions[map_criterion]
map_anomalies = good_ratio[map_criterion]
mapping_ratios[ref] = [positions, good_ratio, bad_ratio, unmapped_ratio, other_ratio]
anomalies[ref] = [pos_anomalies, map_anomalies]
return mapping_ratios, anomalies
def get_size_anomalies(self):
"""
Function to determine the frequency of insert size anomalies across the entire genome assembly.
Calls probability_of_readlength for each contig larger than the aligned_assembly.min_size and returns:
1) a dictionary with keys = contig reference IDs; values = array of Zscores as described in probability_of_readlength
2) a dictionary of anomalies wiht keys = contig reference IDs, values = [list of positions for which abs(z-score) > 2 (currently hard-coded), corresponding z-score value]
"""
anomalies = {}
zscores = {}
all_probabilities = []
stock_probabilities = {}
for w, (ref, length) in enumerate(self.refdict.iteritems()):
if length > self.min_size:
tree = self.make_tree(ref) # build tree from all reads aligning to a contig
if not tree:
continue
positions = np.arange(self.step, length - self.window, self.step)
probabilities = []
print "\nProcessing ",ref
npos = float(len(positions))
for idx,pos in enumerate(positions):
# update progress bar
update_progress(idx/npos)
bridges = np.array(find_intersections(tree, pos-self.window, pos+self.window)) # fetch reads in windows across contig
bridge_lengths, strand_alignment = get_insertlengths(bridges) # get insert sizes and mapping behaviour
prob_lengths = probability_of_readlength(bridge_lengths, self.isize_mean, self.isize_sd, self.prior, length) # get prob. insert sizes from null
condition = strand_alignment == 1
D = np.sum(prob_lengths[condition]) # D is total assembly support
probabilities.append(D)
all_probabilities.append(D)
stock_probabilities[ref] = [positions, np.array(probabilities)]
p_mean = np.mean(np.array(all_probabilities)) # get contig mean and variance
p_std = np.std(np.array(all_probabilities))
if p_std == 0:
p_std = 0.01
for ref, [positions, probs] in stock_probabilities.iteritems():
zscore = (probs - p_mean) / p_std # calculate position z score from contig mean, std
# anomalies have Zscore < Threshold.
# Note: threshold should be negative
z_criterion = (zscore < self.threshold)
z_anomalies = zscore[z_criterion]
#print ref, z_anomalies
pos_anomalies = positions[z_criterion]
zscores[ref] = [positions, zscore]
anomalies[ref] = [pos_anomalies, z_anomalies] # list of anomaly locations and socres
return zscores, anomalies, tree
def get_anomalies(self, outfile, trim, img_name=None):
"""
Function to determine the frequency of anomalous mate pair behaviour across the entire genome assembly and return a dictionary where:
key = contig reference IDs,
value = list of postions within that contig where an assembly error is identified and the contig should be broken.
Calls get_size_anomalies and get_mapping_anomalies for each contig larger than the aligned_assembly.min_size; makes a .csv file listing for each contig the positions of identified misassemblies and their corresponding anomalous scores.
Arguments:
outfile: name of file (including filepath) to store the list of contig misassemblies.
Keyword Arguments:
img_name: name of file (including filepath, not including filetype) to store plots of alignment quality
"""
#print "Anomaly detection"
# get anomaly positions
zscores, size_anomalies, tree = self.get_size_anomalies()
map_ratios, map_anomalies = self.get_mapping_anomalies()
break_points = {}
# # make a wiggle file
# print "Writing wiggle file"
# wig_file = "%s.wig" % ("/media/rmurphy/sandbox/bash_scripts/test_TB")
# with open(wig_file, "w") as wig:
# #wig.write("track type=wiggle_0 graphType=line color=0,0,255 altColor=255,0,0 name='Zscore' graphType=heatmap midRange=35:65 midColor=0,255,0\n")
# for ref, [positions, probs] in zscores.iteritems():
# print ref
# wig.write("fixedStep chrom=%s start=%s step=%s span=%s\n" % (ref, 1, self.step, 1))
# #print zscores[ref]
# #for vals in zscores[ref]:
# # positions = vals[0]
# # probs = vals[1]
# for prob in probs:
# wig.write("%s\n" % (prob))
# wig.write("\n")
for w, (ref, [positions, probs]) in enumerate(zscores.iteritems()):
# write all Z scores to a csv file
#print "Writing Z scores to file (%s)" % ref
for pos, prob in zip(positions, probs):
outfile.write("%s %s %s\n" %(ref, pos, prob))
z_pos, z_anomalies = size_anomalies[ref]
#print "z_anomalies:", z_anomalies
#print ref, z_pos
map_positions, good_ratio, bad_ratio, unmapped_ratio, other_ratio = map_ratios[ref]
pos_anomalies, map_anom = map_anomalies[ref]
anomaly_positions = sorted(z_pos.tolist())
#print ref, anomaly_positions
# make list of positions to break this contig
break_points[ref] = []
if len(anomaly_positions) != 0:
current = []
for p in range(len(anomaly_positions)):
anom_pos = anomaly_positions[p]
if current == []:
current.append(anom_pos)
else:
if anom_pos - current[-1] <= trim:
# if anomalies are well separated flush current values to break_point
current.append(anom_pos)
else:
break_points[ref].append(np.mean(current))
current = [anom_pos]
if current != []:
break_points[ref].append(np.mean(current))
#print "Breakpoints for ",ref, break_points[ref]
if img_name != None:
# plot zscores and anomalies
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
plt.subplots_adjust(bottom=0.15)
ax1.set_xlim([0, max(map_positions)])
ax1.set_xlabel("Position",size=24)
ax1.set_ylabel("Assembly Support", size=24)
plt.tick_params(axis='both', which='major', labelsize=20)
lns1 = ax1.plot(positions, probs, c="k", label="Support")
#plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
anomalies_to_plot = sorted(break_points[ref])
anomalies_y = [-10] * len(anomalies_to_plot)
ax1.scatter(anomalies_to_plot, anomalies_y, c="r", marker = "o")
#print "anomalies", anomalies_to_plot
# if name given, save image as .pdf and .png
name = img_name + "_%s.pdf" % ref
plt.savefig(name)
name = img_name + "_%s.png" % ref
plt.savefig(name)
plt.cla()
return break_points
def breakContigs_double(self,outfile, breakpoints, trim):
"""
Function to break a contigs at positions identified as assembly errors and write a new fasta file containing all contigs (both altered and unaltered).
Makes a two-point break at the identified misassembly position, splitting at 5 Kb upstream and downstream of the misassembly and (currently) excluding the misassembled region.
Arguments:
outfile: name of the new fasta file (including filepath)
breakpoints: dictionary of misassemblies. key = contig reference ID, value = list of misassembly positions within the contig
trim: distance, in bases, to trim from each each edge of a breakpoint to remove misassembly (integer)
"""
#for k, v in breakpoints.iteritems():
# print breakpoints
#print k, v
newcontigs = []
for contig, length in self.refdict.iteritems():
#dna = self.fasta[contig] # sequence of contig
dna = self.fasta.fetch(reference=contig) # sequence of contig
if len(dna) <= 0:
print >> sys.stderr, "Cannot find BAM contig",contig," in Fasta file. Aborting."
sys.exit()
if contig in breakpoints:
splits = breakpoints[contig]
splits.sort()
prev = 0
for s in splits: # iterate through breakpoints
#print s
if (s - prev > trim) and ((length - s) > trim):
newcontigs.append((contig,dna[int(prev):int(s-trim)])) # trim and append section before break
print "Breaking",contig,"at",prev
prev = s + trim # trim other end of break
newcontigs.append((contig,dna[int(prev):]))
else:
newcontigs.append((contig,dna))
# write new contigs to file
newcontigs.sort(lambda x,y: cmp(len(x), len(y)),reverse=True)
for count, tup in enumerate(newcontigs):
name = ">CONTIG_%d_length_%d_%s"%(count,len(tup[1]),tup[0])
#print name
outfile.write(name)
outfile.write("\n")
outfile.write(tup[1])
outfile.write("\n")
def main():
# read command line arguments
import argparse
parser = argparse.ArgumentParser(description='Routine to identify and correct large-scale misassemblies in de novo assemblies')
parser.add_argument('bam', metavar='bam', type=str, help='bam')
parser.add_argument('fasta', metavar='fasta', type=str, help='scaffold fasta')
parser.add_argument('outfile', metavar='outfile', type=str, help='Output file name')
parser.add_argument('newfasta', metavar='newfasta', type=str, help='Fasta file for new contigs, including filepath')
parser.add_argument('-min_size', metavar='min_size', type=int, default=10000, help='Minimum contig size to analyse')
parser.add_argument('-img_name', metavar ='img_name', type=str, default=None, help='Name under which to save (optional) graphs of alignment quality. Default value: None (no graphs produced)')
parser.add_argument('-trim', metavar ='trim', type=int, default=4000, help='Number of bases to trim from each side of an identified misassembly. Default value: 5000')
parser.add_argument('-T', metavar ='T', type=float, default= -4.0, help='Threshold in Z score below which a misassembly is called. Default value: -4.0')
parser.add_argument('-step_size', metavar ='step_size', type=int, default=1000, help='Step-size in bases to traverse contigs. Default value: 1000')
parser.add_argument('-window', metavar ='window', type=int, default=200, help='Window size across which bridging mate pairs are evaluated. Default value: 200')
parser.add_argument('-minmapq', metavar ='minmapq', type=int, default=40, help='Minimum MapQ value, above which a read pair is included in calculating population statistics. Default value: 40')
parser.add_argument('-maxinsert', metavar ='maxinsert', type=int, default=30000, help='Maximum insert size, below which a read pair is included in calculating population statistics. Default value: 30000')
parser.add_argument('-fraction', metavar ='fraction', type=int, default=0.75, help='Minimum fraction of read pairs with correct orientation to call support for the assembly. Default value: 0.75')
parser.add_argument('-prior', metavar ='prior', type=float, default=0.01, help='Prior probablility that the insert size is anomalous. Default value: 0.01')
args = parser.parse_args()
# make assembly object
f = aligned_assembly(args.bam, args.fasta, args.min_size, args.T, args.step_size, args.window, args.minmapq, args.maxinsert, args.fraction, args.prior)
print "Search for anomalous alignments"
# find anomalies
with open(args.outfile, "w") as of:
bps = f.get_anomalies(of, args.trim, args.img_name)
# break contig at identified anomalies
print "\nBreaking contigs"
with open(args.newfasta, "w") as outfasta:
f.breakContigs_double(outfasta, bps, args.trim)
if __name__ == "__main__":
main()
|
|
#
# File : building.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
# 2015-07-25 Bernard Add LOCAL_CCFLAGS/LOCAL_CPPPATH/LOCAL_CPPDEFINES for
# group definition.
#
import os
import sys
import string
import utils
import operator
from SCons.Script import *
from utils import _make_path_relative
from mkdist import do_copy_file
BuildOptions = {}
Projects = []
Rtt_Root = ''
Env = None
# SCons PreProcessor patch
def start_handling_includes(self, t=None):
"""
Causes the PreProcessor object to start processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates True, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated
False.
"""
d = self.dispatch_table
p = self.stack[-1] if self.stack else self.default_table
for k in ('import', 'include', 'include_next', 'define'):
d[k] = p[k]
def stop_handling_includes(self, t=None):
"""
Causes the PreProcessor object to stop processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates False, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated True.
"""
d = self.dispatch_table
d['import'] = self.do_nothing
d['include'] = self.do_nothing
d['include_next'] = self.do_nothing
d['define'] = self.do_nothing
PatchedPreProcessor = SCons.cpp.PreProcessor
PatchedPreProcessor.start_handling_includes = start_handling_includes
PatchedPreProcessor.stop_handling_includes = stop_handling_includes
class Win32Spawn:
def spawn(self, sh, escape, cmd, args, env):
# deal with the cmd build-in commands which cannot be used in
# subprocess.Popen
if cmd == 'del':
for f in args[1:]:
try:
os.remove(f)
except Exception as e:
print('Error removing file: ' + e)
return -1
return 0
import subprocess
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
# Make sure the env is constructed by strings
_e = dict([(k, str(v)) for k, v in env.items()])
# Windows(tm) CreateProcess does not use the env passed to it to find
# the executables. So we have to modify our own PATH to make Popen
# work.
old_path = os.environ['PATH']
os.environ['PATH'] = _e['PATH']
try:
proc = subprocess.Popen(cmdline, env=_e, shell=False)
except Exception as e:
print('Error in calling command:' + cmdline.split(' ')[0])
print('Exception: ' + os.strerror(e.errno))
if (os.strerror(e.errno) == "No such file or directory"):
print ("\nPlease check Toolchains PATH setting.\n")
return e.errno
finally:
os.environ['PATH'] = old_path
return proc.wait()
# generate cconfig.h file
def GenCconfigFile(env, BuildOptions):
import rtconfig
if rtconfig.PLATFORM == 'gcc':
contents = ''
if not os.path.isfile('cconfig.h'):
import gcc
gcc.GenerateGCCConfig(rtconfig)
# try again
if os.path.isfile('cconfig.h'):
f = open('cconfig.h', 'r')
if f:
contents = f.read()
f.close()
prep = PatchedPreProcessor()
prep.process_contents(contents)
options = prep.cpp_namespace
BuildOptions.update(options)
# add HAVE_CCONFIG_H definition
env.AppendUnique(CPPDEFINES = ['HAVE_CCONFIG_H'])
def PrepareBuilding(env, root_directory, has_libcpu=False, remove_components = []):
import rtconfig
global BuildOptions
global Projects
global Env
global Rtt_Root
# ===== Add option to SCons =====
AddOption('--dist',
dest = 'make-dist',
action = 'store_true',
default = False,
help = 'make distribution')
AddOption('--dist-strip',
dest = 'make-dist-strip',
action = 'store_true',
default = False,
help = 'make distribution and strip useless files')
AddOption('--dist-ide',
dest = 'make-dist-ide',
action = 'store_true',
default = False,
help = 'make distribution for RT-Thread Studio IDE')
AddOption('--project-path',
dest = 'project-path',
type = 'string',
default = None,
help = 'set dist-ide project output path')
AddOption('--project-name',
dest = 'project-name',
type = 'string',
default = None,
help = 'set project name')
AddOption('--reset-project-config',
dest = 'reset-project-config',
action = 'store_true',
default = False,
help = 'reset the project configurations to default')
AddOption('--cscope',
dest = 'cscope',
action = 'store_true',
default = False,
help = 'Build Cscope cross reference database. Requires cscope installed.')
AddOption('--clang-analyzer',
dest = 'clang-analyzer',
action = 'store_true',
default = False,
help = 'Perform static analyze with Clang-analyzer. ' + \
'Requires Clang installed.\n' + \
'It is recommended to use with scan-build like this:\n' + \
'`scan-build scons --clang-analyzer`\n' + \
'If things goes well, scan-build will instruct you to invoke scan-view.')
AddOption('--buildlib',
dest = 'buildlib',
type = 'string',
help = 'building library of a component')
AddOption('--cleanlib',
dest = 'cleanlib',
action = 'store_true',
default = False,
help = 'clean up the library by --buildlib')
AddOption('--target',
dest = 'target',
type = 'string',
help = 'set target project: mdk/mdk4/mdk5/iar/vs/vsc/ua/cdk/ses/makefile/eclipse/codelite/cmake')
AddOption('--stackanalysis',
dest = 'stackanalysis',
action = 'store_true',
default = False,
help = 'thread stack static analysis')
AddOption('--genconfig',
dest = 'genconfig',
action = 'store_true',
default = False,
help = 'Generate .config from rtconfig.h')
AddOption('--useconfig',
dest = 'useconfig',
type = 'string',
help = 'make rtconfig.h from config file.')
AddOption('--verbose',
dest = 'verbose',
action = 'store_true',
default = False,
help = 'print verbose information during build')
Env = env
Rtt_Root = os.path.abspath(root_directory)
# make an absolute root directory
RTT_ROOT = Rtt_Root
Export('RTT_ROOT')
# set RTT_ROOT in ENV
Env['RTT_ROOT'] = Rtt_Root
# set BSP_ROOT in ENV
Env['BSP_ROOT'] = Dir('#').abspath
sys.path = sys.path + [os.path.join(Rtt_Root, 'tools')]
# {target_name:(CROSS_TOOL, PLATFORM)}
tgt_dict = {'mdk':('keil', 'armcc'),
'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'vsc' : ('gcc', 'gcc'),
'cb':('keil', 'armcc'),
'ua':('gcc', 'gcc'),
'cdk':('gcc', 'gcc'),
'makefile':('gcc', 'gcc'),
'eclipse':('gcc', 'gcc'),
'ses' : ('gcc', 'gcc'),
'cmake':('gcc', 'gcc'),
'cmake-armclang':('keil', 'armclang'),
'codelite' : ('gcc', 'gcc')}
tgt_name = GetOption('target')
if tgt_name:
# --target will change the toolchain settings which clang-analyzer is
# depend on
if GetOption('clang-analyzer'):
print ('--clang-analyzer cannot be used with --target')
sys.exit(1)
SetOption('no_exec', 1)
try:
rtconfig.CROSS_TOOL, rtconfig.PLATFORM = tgt_dict[tgt_name]
# replace the 'RTT_CC' to 'CROSS_TOOL'
os.environ['RTT_CC'] = rtconfig.CROSS_TOOL
utils.ReloadModule(rtconfig)
except KeyError:
print('Unknow target: '+ tgt_name+'. Avaible targets: ' +', '.join(tgt_dict.keys()))
sys.exit(1)
# auto change the 'RTT_EXEC_PATH' when 'rtconfig.EXEC_PATH' get failed
if not os.path.exists(rtconfig.EXEC_PATH):
if 'RTT_EXEC_PATH' in os.environ:
# del the 'RTT_EXEC_PATH' and using the 'EXEC_PATH' setting on rtconfig.py
del os.environ['RTT_EXEC_PATH']
utils.ReloadModule(rtconfig)
# add compability with Keil MDK 4.6 which changes the directory of armcc.exe
if rtconfig.PLATFORM == 'armcc' or rtconfig.PLATFORM == 'armclang':
if rtconfig.PLATFORM == 'armcc' and not os.path.isfile(os.path.join(rtconfig.EXEC_PATH, 'armcc.exe')):
if rtconfig.EXEC_PATH.find('bin40') > 0:
rtconfig.EXEC_PATH = rtconfig.EXEC_PATH.replace('bin40', 'armcc/bin')
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('RV31', 'armcc')
# reset AR command flags
env['ARCOM'] = '$AR --create $TARGET $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['LIBLINKPREFIX'] = ''
env['LIBLINKSUFFIX'] = '.lib'
env['LIBDIRPREFIX'] = '--userlibpath '
elif rtconfig.PLATFORM == 'iar':
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.a'
env['LIBLINKPREFIX'] = ''
env['LIBLINKSUFFIX'] = '.a'
env['LIBDIRPREFIX'] = '--search '
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
if env['PLATFORM'] == 'win32':
os.environ['PATH'] = rtconfig.EXEC_PATH + ";" + os.environ['PATH']
else:
os.environ['PATH'] = rtconfig.EXEC_PATH + ":" + os.environ['PATH']
# add program path
env.PrependENVPath('PATH', os.environ['PATH'])
# add rtconfig.h/BSP path into Kernel group
DefineGroup("Kernel", [], [], CPPPATH=[str(Dir('#').abspath)])
# add library build action
act = SCons.Action.Action(BuildLibInstallAction, 'Install compiled library... $TARGET')
bld = Builder(action = act)
Env.Append(BUILDERS = {'BuildLib': bld})
# parse rtconfig.h to get used component
PreProcessor = PatchedPreProcessor()
f = open('rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
if GetOption('clang-analyzer'):
# perform what scan-build does
env.Replace(
CC = 'ccc-analyzer',
CXX = 'c++-analyzer',
# skip as and link
LINK = 'true',
AS = 'true',)
env["ENV"].update(x for x in os.environ.items() if x[0].startswith("CCC_"))
# only check, don't compile. ccc-analyzer use CCC_CC as the CC.
# fsyntax-only will give us some additional warning messages
env['ENV']['CCC_CC'] = 'clang'
env.Append(CFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
env['ENV']['CCC_CXX'] = 'clang++'
env.Append(CXXFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
# remove the POST_ACTION as it will cause meaningless errors(file not
# found or something like that).
rtconfig.POST_ACTION = ''
# generate cconfig.h file
GenCconfigFile(env, BuildOptions)
# auto append '_REENT_SMALL' when using newlib 'nano.specs' option
if rtconfig.PLATFORM == 'gcc' and str(env['LINKFLAGS']).find('nano.specs') != -1:
env.AppendUnique(CPPDEFINES = ['_REENT_SMALL'])
if GetOption('genconfig'):
from genconf import genconfig
genconfig()
exit(0)
if GetOption('stackanalysis'):
from WCS import ThreadStackStaticAnalysis
ThreadStackStaticAnalysis(Env)
exit(0)
if env['PLATFORM'] != 'win32':
AddOption('--menuconfig',
dest = 'menuconfig',
action = 'store_true',
default = False,
help = 'make menuconfig for RT-Thread BSP')
if GetOption('menuconfig'):
from menuconfig import menuconfig
menuconfig(Rtt_Root)
exit(0)
AddOption('--pyconfig',
dest = 'pyconfig',
action = 'store_true',
default = False,
help = 'Python GUI menuconfig for RT-Thread BSP')
AddOption('--pyconfig-silent',
dest = 'pyconfig_silent',
action = 'store_true',
default = False,
help = 'Don`t show pyconfig window')
if GetOption('pyconfig_silent'):
from menuconfig import guiconfig_silent
guiconfig_silent(Rtt_Root)
exit(0)
elif GetOption('pyconfig'):
from menuconfig import guiconfig
guiconfig(Rtt_Root)
exit(0)
configfn = GetOption('useconfig')
if configfn:
from menuconfig import mk_rtconfig
mk_rtconfig(configfn)
exit(0)
if not GetOption('verbose'):
# override the default verbose command string
env.Replace(
ARCOMSTR = 'AR $TARGET',
ASCOMSTR = 'AS $TARGET',
ASPPCOMSTR = 'AS $TARGET',
CCCOMSTR = 'CC $TARGET',
CXXCOMSTR = 'CXX $TARGET',
LINKCOMSTR = 'LINK $TARGET'
)
# fix the linker for C++
if GetDepend('RT_USING_CPLUSPLUS'):
if env['LINK'].find('gcc') != -1:
env['LINK'] = env['LINK'].replace('gcc', 'g++')
# we need to seperate the variant_dir for BSPs and the kernels. BSPs could
# have their own components etc. If they point to the same folder, SCons
# would find the wrong source code to compile.
bsp_vdir = 'build'
kernel_vdir = 'build/kernel'
# board build script
objs = SConscript('SConscript', variant_dir=bsp_vdir, duplicate=0)
# include kernel
objs.extend(SConscript(Rtt_Root + '/src/SConscript', variant_dir=kernel_vdir + '/src', duplicate=0))
# include libcpu
if not has_libcpu:
objs.extend(SConscript(Rtt_Root + '/libcpu/SConscript',
variant_dir=kernel_vdir + '/libcpu', duplicate=0))
# include components
objs.extend(SConscript(Rtt_Root + '/components/SConscript',
variant_dir=kernel_vdir + '/components',
duplicate=0,
exports='remove_components'))
# include testcases
if os.path.isfile(os.path.join(Rtt_Root, 'examples/utest/testcases/SConscript')):
objs.extend(SConscript(Rtt_Root + '/examples/utest/testcases/SConscript',
variant_dir=kernel_vdir + '/examples/utest/testcases',
duplicate=0))
return objs
def PrepareModuleBuilding(env, root_directory, bsp_directory):
import rtconfig
global BuildOptions
global Env
global Rtt_Root
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
Env = env
Rtt_Root = root_directory
# parse bsp rtconfig.h to get used component
PreProcessor = PatchedPreProcessor()
f = open(bsp_directory + '/rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
# add build/clean library option for library checking
AddOption('--buildlib',
dest='buildlib',
type='string',
help='building library of a component')
AddOption('--cleanlib',
dest='cleanlib',
action='store_true',
default=False,
help='clean up the library by --buildlib')
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
def GetConfigValue(name):
assert type(name) == str, 'GetConfigValue: only string parameter is valid'
try:
return BuildOptions[name]
except:
return ''
def GetDepend(depend):
building = True
if type(depend) == type('str'):
if not depend in BuildOptions or BuildOptions[depend] == 0:
building = False
elif BuildOptions[depend] != '':
return BuildOptions[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not item in BuildOptions or BuildOptions[item] == 0:
building = False
return building
def LocalOptions(config_filename):
from SCons.Script import SCons
# parse wiced_config.h to get used component
PreProcessor = SCons.cpp.PreProcessor()
f = open(config_filename, 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
local_options = PreProcessor.cpp_namespace
return local_options
def GetLocalDepend(options, depend):
building = True
if type(depend) == type('str'):
if not depend in options or options[depend] == 0:
building = False
elif options[depend] != '':
return options[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not item in options or options[item] == 0:
building = False
return building
def AddDepend(option):
BuildOptions[option] = 1
def MergeGroup(src_group, group):
src_group['src'] = src_group['src'] + group['src']
if 'CCFLAGS' in group:
if 'CCFLAGS' in src_group:
src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS']
else:
src_group['CCFLAGS'] = group['CCFLAGS']
if 'CPPPATH' in group:
if 'CPPPATH' in src_group:
src_group['CPPPATH'] = src_group['CPPPATH'] + group['CPPPATH']
else:
src_group['CPPPATH'] = group['CPPPATH']
if 'CPPDEFINES' in group:
if 'CPPDEFINES' in src_group:
src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES']
else:
src_group['CPPDEFINES'] = group['CPPDEFINES']
if 'ASFLAGS' in group:
if 'ASFLAGS' in src_group:
src_group['ASFLAGS'] = src_group['ASFLAGS'] + group['ASFLAGS']
else:
src_group['ASFLAGS'] = group['ASFLAGS']
# for local CCFLAGS/CPPPATH/CPPDEFINES
if 'LOCAL_CCFLAGS' in group:
if 'LOCAL_CCFLAGS' in src_group:
src_group['LOCAL_CCFLAGS'] = src_group['LOCAL_CCFLAGS'] + group['LOCAL_CCFLAGS']
else:
src_group['LOCAL_CCFLAGS'] = group['LOCAL_CCFLAGS']
if 'LOCAL_CPPPATH' in group:
if 'LOCAL_CPPPATH' in src_group:
src_group['LOCAL_CPPPATH'] = src_group['LOCAL_CPPPATH'] + group['LOCAL_CPPPATH']
else:
src_group['LOCAL_CPPPATH'] = group['LOCAL_CPPPATH']
if 'LOCAL_CPPDEFINES' in group:
if 'LOCAL_CPPDEFINES' in src_group:
src_group['LOCAL_CPPDEFINES'] = src_group['LOCAL_CPPDEFINES'] + group['LOCAL_CPPDEFINES']
else:
src_group['LOCAL_CPPDEFINES'] = group['LOCAL_CPPDEFINES']
if 'LINKFLAGS' in group:
if 'LINKFLAGS' in src_group:
src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS']
else:
src_group['LINKFLAGS'] = group['LINKFLAGS']
if 'LIBS' in group:
if 'LIBS' in src_group:
src_group['LIBS'] = src_group['LIBS'] + group['LIBS']
else:
src_group['LIBS'] = group['LIBS']
if 'LIBPATH' in group:
if 'LIBPATH' in src_group:
src_group['LIBPATH'] = src_group['LIBPATH'] + group['LIBPATH']
else:
src_group['LIBPATH'] = group['LIBPATH']
if 'LOCAL_ASFLAGS' in group:
if 'LOCAL_ASFLAGS' in src_group:
src_group['LOCAL_ASFLAGS'] = src_group['LOCAL_ASFLAGS'] + group['LOCAL_ASFLAGS']
else:
src_group['LOCAL_ASFLAGS'] = group['LOCAL_ASFLAGS']
def _PretreatListParameters(target_list):
while '' in target_list: # remove null strings
target_list.remove('')
while ' ' in target_list: # remove ' '
target_list.remove(' ')
if(len(target_list) == 0):
return False # ignore this list, don't add this list to the parameter
return True # permit to add this list to the parameter
def DefineGroup(name, src, depend, **parameters):
global Env
if not GetDepend(depend):
return []
# find exist group and get path of group
group_path = ''
for g in Projects:
if g['name'] == name:
group_path = g['path']
if group_path == '':
group_path = GetCurrentDir()
group = parameters
group['name'] = name
group['path'] = group_path
if type(src) == type([]):
# remove duplicate elements from list
src = list(set(src))
group['src'] = File(src)
else:
group['src'] = src
if 'CCFLAGS' in group:
target = group['CCFLAGS']
if len(target) > 0:
Env.AppendUnique(CCFLAGS = target)
if 'CPPPATH' in group:
target = group['CPPPATH']
if _PretreatListParameters(target) == True:
paths = []
for item in target:
paths.append(os.path.abspath(item))
target = paths
Env.AppendUnique(CPPPATH = target)
if 'CPPDEFINES' in group:
target = group['CPPDEFINES']
if _PretreatListParameters(target) == True:
Env.AppendUnique(CPPDEFINES = target)
if 'LINKFLAGS' in group:
target = group['LINKFLAGS']
if len(target) > 0:
Env.AppendUnique(LINKFLAGS = target)
if 'ASFLAGS' in group:
target = group['ASFLAGS']
if len(target) > 0:
Env.AppendUnique(ASFLAGS = target)
if 'LOCAL_CPPPATH' in group:
paths = []
for item in group['LOCAL_CPPPATH']:
paths.append(os.path.abspath(item))
group['LOCAL_CPPPATH'] = paths
import rtconfig
if rtconfig.PLATFORM == 'gcc':
if 'CCFLAGS' in group:
group['CCFLAGS'] = utils.GCCC99Patch(group['CCFLAGS'])
if 'LOCAL_CCFLAGS' in group:
group['LOCAL_CCFLAGS'] = utils.GCCC99Patch(group['LOCAL_CCFLAGS'])
# check whether to clean up library
if GetOption('cleanlib') and os.path.exists(os.path.join(group['path'], GroupLibFullName(name, Env))):
if group['src'] != []:
print('Remove library:'+ GroupLibFullName(name, Env))
fn = os.path.join(group['path'], GroupLibFullName(name, Env))
if os.path.exists(fn):
os.unlink(fn)
if 'LIBS' in group:
target = group['LIBS']
if _PretreatListParameters(target) == True:
Env.AppendUnique(LIBS = target)
if 'LIBPATH' in group:
target = group['LIBPATH']
if _PretreatListParameters(target) == True:
Env.AppendUnique(LIBPATH = target)
# check whether to build group library
if 'LIBRARY' in group:
objs = Env.Library(name, group['src'])
else:
# only add source
objs = group['src']
# merge group
for g in Projects:
if g['name'] == name:
# merge to this group
MergeGroup(g, group)
return objs
def PriorityInsertGroup(groups, group):
length = len(groups)
for i in range(0, length):
if operator.gt(groups[i]['name'].lower(), group['name'].lower()):
groups.insert(i, group)
return
groups.append(group)
# add a new group
PriorityInsertGroup(Projects, group)
return objs
def GetCurrentDir():
conscript = File('SConscript')
fn = conscript.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
return path
PREBUILDING = []
def RegisterPreBuildingAction(act):
global PREBUILDING
assert callable(act), 'Could only register callable objects. %s received' % repr(act)
PREBUILDING.append(act)
def PreBuilding():
global PREBUILDING
for a in PREBUILDING:
a()
def GroupLibName(name, env):
import rtconfig
if rtconfig.PLATFORM == 'armcc':
return name + '_rvds'
elif rtconfig.PLATFORM == 'gcc':
return name + '_gcc'
return name
def GroupLibFullName(name, env):
return env['LIBPREFIX'] + GroupLibName(name, env) + env['LIBSUFFIX']
def BuildLibInstallAction(target, source, env):
lib_name = GetOption('buildlib')
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibFullName(Group['name'], env)
dst_name = os.path.join(Group['path'], lib_name)
print('Copy '+lib_name+' => ' + dst_name)
do_copy_file(lib_name, dst_name)
break
def DoBuilding(target, objects):
# merge all objects into one list
def one_list(l):
lst = []
for item in l:
if type(item) == type([]):
lst += one_list(item)
else:
lst.append(item)
return lst
# handle local group
def local_group(group, objects):
if 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPPATH' in group or 'LOCAL_CPPDEFINES' in group or 'LOCAL_ASFLAGS' in group:
CCFLAGS = Env.get('CCFLAGS', '') + group.get('LOCAL_CCFLAGS', '')
CPPPATH = Env.get('CPPPATH', ['']) + group.get('LOCAL_CPPPATH', [''])
CPPDEFINES = Env.get('CPPDEFINES', ['']) + group.get('LOCAL_CPPDEFINES', [''])
ASFLAGS = Env.get('ASFLAGS', '') + group.get('LOCAL_ASFLAGS', '')
for source in group['src']:
objects.append(Env.Object(source, CCFLAGS = CCFLAGS, ASFLAGS = ASFLAGS,
CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES))
return True
return False
objects = one_list(objects)
program = None
# check whether special buildlib option
lib_name = GetOption('buildlib')
if lib_name:
objects = [] # remove all of objects
# build library with special component
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibName(Group['name'], Env)
if not local_group(Group, objects):
objects = Env.Object(Group['src'])
program = Env.Library(lib_name, objects)
# add library copy action
Env.BuildLib(lib_name, program)
break
else:
# remove source files with local flags setting
for group in Projects:
if 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPPATH' in group or 'LOCAL_CPPDEFINES' in group:
for source in group['src']:
for obj in objects:
if source.abspath == obj.abspath or (len(obj.sources) > 0 and source.abspath == obj.sources[0].abspath):
objects.remove(obj)
# re-add the source files to the objects
for group in Projects:
local_group(group, objects)
program = Env.Program(target, objects)
EndBuilding(target, program)
def GenTargetProject(program = None):
if GetOption('target') == 'mdk':
from keil import MDKProject
from keil import MDK4Project
from keil import MDK5Project
template = os.path.isfile('template.Uv2')
if template:
MDKProject('project.Uv2', Projects)
else:
template = os.path.isfile('template.uvproj')
if template:
MDK4Project('project.uvproj', Projects)
else:
template = os.path.isfile('template.uvprojx')
if template:
MDK5Project('project.uvprojx', Projects)
else:
print ('No template project file found.')
if GetOption('target') == 'mdk4':
from keil import MDK4Project
MDK4Project('project.uvproj', Projects)
if GetOption('target') == 'mdk5':
from keil import MDK5Project
MDK5Project('project.uvprojx', Projects)
if GetOption('target') == 'iar':
from iar import IARProject
IARProject('project.ewp', Projects)
if GetOption('target') == 'vs':
from vs import VSProject
VSProject('project.vcproj', Projects, program)
if GetOption('target') == 'vs2012':
from vs2012 import VS2012Project
VS2012Project('project.vcxproj', Projects, program)
if GetOption('target') == 'cb':
from codeblocks import CBProject
CBProject('project.cbp', Projects, program)
if GetOption('target') == 'ua':
from ua import PrepareUA
PrepareUA(Projects, Rtt_Root, str(Dir('#')))
if GetOption('target') == 'vsc':
from vsc import GenerateVSCode
GenerateVSCode(Env)
if GetOption('target') == 'cdk':
from cdk import CDKProject
CDKProject('project.cdkproj', Projects)
if GetOption('target') == 'ses':
from ses import SESProject
SESProject(Env)
if GetOption('target') == 'makefile':
from makefile import TargetMakefile
TargetMakefile(Env)
if GetOption('target') == 'eclipse':
from eclipse import TargetEclipse
TargetEclipse(Env, GetOption('reset-project-config'), GetOption('project-name'))
if GetOption('target') == 'codelite':
from codelite import TargetCodelite
TargetCodelite(Projects, program)
if GetOption('target') == 'cmake' or GetOption('target') == 'cmake-armclang':
from cmake import CMakeProject
CMakeProject(Env,Projects)
def EndBuilding(target, program = None):
import rtconfig
need_exit = False
Env['target'] = program
Env['project'] = Projects
if hasattr(rtconfig, 'BSP_LIBRARY_TYPE'):
Env['bsp_lib_type'] = rtconfig.BSP_LIBRARY_TYPE
if hasattr(rtconfig, 'dist_handle'):
Env['dist_handle'] = rtconfig.dist_handle
Env.AddPostAction(target, rtconfig.POST_ACTION)
# Add addition clean files
Clean(target, 'cconfig.h')
Clean(target, 'rtua.py')
Clean(target, 'rtua.pyc')
if GetOption('target'):
GenTargetProject(program)
BSP_ROOT = Dir('#').abspath
if GetOption('make-dist') and program != None:
from mkdist import MkDist
MkDist(program, BSP_ROOT, Rtt_Root, Env)
if GetOption('make-dist-strip') and program != None:
from mkdist import MkDist_Strip
MkDist_Strip(program, BSP_ROOT, Rtt_Root, Env)
need_exit = True
if GetOption('make-dist-ide') and program != None:
from mkdist import MkDist
project_path = GetOption('project-path')
project_name = GetOption('project-name')
if not isinstance(project_path, str) or len(project_path) == 0 :
project_path = os.path.join(BSP_ROOT, 'dist_ide_project')
print("\nwarning : --project-path not specified, use default path: {0}.".format(project_path))
if not isinstance(project_name, str) or len(project_name) == 0:
project_name = "dist_ide_project"
print("\nwarning : --project-name not specified, use default project name: {0}.".format(project_name))
rtt_ide = {'project_path' : project_path, 'project_name' : project_name}
MkDist(program, BSP_ROOT, Rtt_Root, Env, rtt_ide)
need_exit = True
if GetOption('cscope'):
from cscope import CscopeDatabase
CscopeDatabase(Projects)
if not GetOption('help') and not GetOption('target'):
if not os.path.exists(rtconfig.EXEC_PATH):
print ("Error: the toolchain path (" + rtconfig.EXEC_PATH + ") is not exist, please check 'EXEC_PATH' in path or rtconfig.py.")
need_exit = True
if need_exit:
exit(0)
def SrcRemove(src, remove):
if not src:
return
src_bak = src[:]
if type(remove) == type('str'):
if os.path.isabs(remove):
remove = os.path.relpath(remove, GetCurrentDir())
remove = os.path.normpath(remove)
for item in src_bak:
if type(item) == type('str'):
item_str = item
else:
item_str = item.rstr()
if os.path.isabs(item_str):
item_str = os.path.relpath(item_str, GetCurrentDir())
item_str = os.path.normpath(item_str)
if item_str == remove:
src.remove(item)
else:
for remove_item in remove:
remove_str = str(remove_item)
if os.path.isabs(remove_str):
remove_str = os.path.relpath(remove_str, GetCurrentDir())
remove_str = os.path.normpath(remove_str)
for item in src_bak:
if type(item) == type('str'):
item_str = item
else:
item_str = item.rstr()
if os.path.isabs(item_str):
item_str = os.path.relpath(item_str, GetCurrentDir())
item_str = os.path.normpath(item_str)
if item_str == remove_str:
src.remove(item)
def GetVersion():
import SCons.cpp
import string
rtdef = os.path.join(Rtt_Root, 'include', 'rtdef.h')
# parse rtdef.h to get RT-Thread version
prepcessor = PatchedPreProcessor()
f = open(rtdef, 'r')
contents = f.read()
f.close()
prepcessor.process_contents(contents)
def_ns = prepcessor.cpp_namespace
version = int([ch for ch in def_ns['RT_VERSION'] if ch in '0123456789.'])
subversion = int([ch for ch in def_ns['RT_SUBVERSION'] if ch in '0123456789.'])
if 'RT_REVISION' in def_ns:
revision = int([ch for ch in def_ns['RT_REVISION'] if ch in '0123456789.'])
return '%d.%d.%d' % (version, subversion, revision)
return '0.%d.%d' % (version, subversion)
def GlobSubDir(sub_dir, ext_name):
import os
import glob
def glob_source(sub_dir, ext_name):
list = os.listdir(sub_dir)
src = glob.glob(os.path.join(sub_dir, ext_name))
for item in list:
full_subdir = os.path.join(sub_dir, item)
if os.path.isdir(full_subdir):
src += glob_source(full_subdir, ext_name)
return src
dst = []
src = glob_source(sub_dir, ext_name)
for item in src:
dst.append(os.path.relpath(item, sub_dir))
return dst
def PackageSConscript(package):
from package import BuildPackage
return BuildPackage(package)
|
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX X XXXXXXX XXX XXX XXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX X
XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
X XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXX XXX XXX XXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXX X XXXXX XXX XXXX XX XXXXXXX XXX XXXXXXX XX XXXX XXXX XX XXX XXXXXX XXXXXXXXX XXX XXXX XXXXX XXXXXXXX XXXXXXXXXX XXXX XX XXXX XX XXXX XXX XXX
XXXX XX XXXXX XX XXXXXXX XXXXX XXXXXXXXXXX XX XXX XXXXXX XXXXX XXX XXXX XXXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX XXXXXXXX XXX XXXX XXXX XX XXX XXX
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXX XX XXXXX XX XXXX XXXXXXXXXXXX
XXXXXXX XXXX XXXXXXXXXX XXXXX XXXX XXX XXXXXXXXX XXX XXXXX X XXX XXXX XXXXXXXXXX XXXXXXX XXXX XX XXXX XXX XX XXXX XXXXXXXX XXX XXXXXXX XX XXXX XXX XXXXX XXXXX XXX
XXXXX XXXXXXXX XXX XXXXX XXX XX XXXX XXX XXX XXXXXXX XXXX XX XXXXXXXXXXXXX XXXXXX XX XXXX XXXX X XXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX X
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
X XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXXXXX XXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX X XXX XXXXXXXX XX XXXXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
|
import unittest
import maya.OpenMaya as om
import maya.standalone
maya.standalone.initialize()
import maya.cmds as cmds
import os
import sys
sys.path.append(os.path.dirname(__file__))
cmds.loadPlugin("rbfblender")
class TestRbfBlender(unittest.TestCase):
def setUp(self):
cmds.file(new=True, f=True)
def create_rbfblender(self):
try:
node = cmds.createNode("rbfblender", n="blender")
success = True
except:
success = False
return success
def test_node_creation(self):
success = self.create_rbfblender()
try:
rbf_nodes = cmds.ls(type="rbfblender")
if rbf_nodes:
success = True
else:
success = False
except:
success = False
self.assert_(success, "Failed to create the node.")
def test_input_attribute(self):
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
rbf_nodes = cmds.ls(type="rbfblender")
rbf_node = rbf_nodes[0]
self.assert_(cmds.attributeQuery("input", n=rbf_node, ex=True), "Failed to get the input attribute")
self.assert_(cmds.attributeQuery("output", n=rbf_node, ex=True), "Failed to get the output attribute")
self.assert_(cmds.attributeQuery("poses", n=rbf_node, ex=True), "Failed to get the poses attribute")
try:
cmds.setAttr("blender.poses[0].poseInputs[0]", 1.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 1.0)
success = True
except:
success = False
self.assert_(success, "Failed to set the poses attribute")
def test_output_attribute(self):
print "###################################################"
print "TEST OUTPUT ATTRIBUTE"
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.setAttr("blender.input[0]", .4)
cmds.setAttr("blender.poses[0].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 0.5)
cmds.setAttr("blender.poses[1].poseInputs[0]", 1.0)
cmds.setAttr("blender.poses[1].poseName", "test", type="string")
cmds.setAttr("blender.poses[1].poseValues[0]", 1.0)
success = True
except Exception, e:
success = False
print e
self.assert_(success, "Failed to set the poses attribute")
cmds.getAttr("blender.output[0]")
cmds.setAttr("blender.input[0]", .0)
cmds.getAttr("blender.output[0]")
cmds.setAttr("blender.input[0]", 1)
cmds.getAttr("blender.output[0]")
cmds.setAttr("blender.input[0]", .5)
cmds.getAttr("blender.output[0]")
def test_passive_output(self):
print "###################################################"
print "TEST PASSIVE_OUTPUT ATTRIBUTE"
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.setAttr("blender.input[0]", .4)
cmds.setAttr("blender.poses[0].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 0.5)
cmds.setAttr("blender.poses[1].poseInputs[0]", 1.0)
cmds.setAttr("blender.poses[1].poseName", "test", type="string")
cmds.setAttr("blender.poses[1].poseValues[0]", 1.0)
success = True
except Exception, e:
success = False
print e
self.assert_(success, "Failed to set the poses attribute")
target_locator = cmds.spaceLocator()[0]
cmds.connectAttr("blender.output[0]", target_locator + ".translateX")
cmds.setAttr("blender.input[0]", 1.0)
self.assert_(abs(cmds.getAttr(target_locator + ".translateX") - 1.0) < .0004, "Bad value of connected attribute " + str(cmds.getAttr(target_locator + ".translateX")))
cmds.setAttr(target_locator + ".translateX", 2.0)
self.assert_(cmds.getAttr(target_locator + ".translateX") == 2.0, "Bad value of passive attribute " + str(cmds.getAttr(target_locator + ".translateX")))
def test_single_pose(self):
print "###################################################"
print "TEST SINGLE POSE"
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.setAttr("blender.input[0]", .4)
cmds.setAttr("blender.poses[0].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 0.5)
success = True
except Exception, e:
success = False
print e
self.assert_(success, "Failed to set the poses attribute")
cmds.getAttr("blender.output[0]")
def test_duplicated_pose(self):
print "###################################################"
print "TEST DUPLICATED POSE"
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.setAttr("blender.input[0]", .4)
cmds.setAttr("blender.poses[0].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 0.5)
cmds.setAttr("blender.poses[1].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[1].poseName", "test", type="string")
cmds.setAttr("blender.poses[1].poseValues[0]", 0.5)
success = True
except Exception, e:
success = False
print e
self.assert_(success, "Failed to set the poses attribute")
cmds.getAttr("blender.output[0]")
def test_non_square_attribute(self):
print "###################################################"
print "TEST NON SQUARE ATTRIBUTE"
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.setAttr("blender.input[0]", 1.0)
cmds.setAttr("blender.input[1]", 0.0)
cmds.setAttr("blender.poses[0].poseInputs[0]", 1.0)
cmds.setAttr("blender.poses[0].poseInputs[1]", 0.0)
cmds.setAttr("blender.poses[0].poseInputs[2]", 0.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 1.0)
cmds.setAttr("blender.poses[0].poseValues[1]", 0.5)
cmds.setAttr("blender.poses[1].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[1].poseInputs[1]", 1.0)
cmds.setAttr("blender.poses[1].poseInputs[2]", 0.0)
cmds.setAttr("blender.poses[1].poseName", "test", type="string")
cmds.setAttr("blender.poses[1].poseValues[0]", 0.0)
cmds.setAttr("blender.poses[1].poseValues[1]", 0.6)
cmds.setAttr("blender.poses[2].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[2].poseInputs[1]", 0.0)
cmds.setAttr("blender.poses[2].poseInputs[2]", 1.0)
cmds.setAttr("blender.poses[2].poseName", "test", type="string")
cmds.setAttr("blender.poses[2].poseValues[0]", 0.0)
cmds.setAttr("blender.poses[2].poseValues[1]", 0.7)
success = True
except Exception, e:
success = False
print e
self.assert_(success, "Failed to set the poses attribute")
cmds.getAttr("blender.output[0]")
cmds.getAttr("blender.output[1]")
def test_multi_output_attribute(self):
print "###################################################"
print "TEST MULTI OUTPUT ATTRIBUTE"
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
cmds.setAttr("blender.output[0]", 0.0)
cmds.setAttr("blender.output[1]", 0.0)
cmds.setAttr("blender.output[2]", 0.0)
try:
cmds.setAttr("blender.input[0]", 1.0)
cmds.setAttr("blender.input[1]", 0.0)
cmds.setAttr("blender.input[2]", 0.0)
cmds.setAttr("blender.poses[0].poseInputs[0]", 1.0)
cmds.setAttr("blender.poses[0].poseInputs[1]", 0.0)
cmds.setAttr("blender.poses[0].poseInputs[2]", 0.0)
cmds.setAttr("blender.poses[0].poseName", "test", type="string")
cmds.setAttr("blender.poses[0].poseValues[0]", 1.0)
cmds.setAttr("blender.poses[0].poseValues[1]", 0.5)
cmds.setAttr("blender.poses[0].poseValues[2]", 0.0)
cmds.setAttr("blender.poses[1].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[1].poseInputs[1]", 1.0)
cmds.setAttr("blender.poses[1].poseInputs[2]", 0.0)
cmds.setAttr("blender.poses[1].poseName", "test", type="string")
cmds.setAttr("blender.poses[1].poseValues[0]", 0.0)
cmds.setAttr("blender.poses[1].poseValues[1]", 0.6)
cmds.setAttr("blender.poses[1].poseValues[2]", 0.0)
cmds.setAttr("blender.poses[2].poseInputs[0]", 0.0)
cmds.setAttr("blender.poses[2].poseInputs[1]", 0.0)
cmds.setAttr("blender.poses[2].poseInputs[2]", 1.0)
cmds.setAttr("blender.poses[2].poseName", "test", type="string")
cmds.setAttr("blender.poses[2].poseValues[0]", 0.0)
cmds.setAttr("blender.poses[2].poseValues[1]", 0.7)
cmds.setAttr("blender.poses[2].poseValues[2]", 1.0)
success = True
except Exception, e:
success = False
print e
self.assert_(success, "Failed to set the poses attribute")
cmds.getAttr("blender.output[0]")
cmds.getAttr("blender.output[1]")
cmds.getAttr("blender.output[2]")
def test_cubes(self):
print "###################################################"
print "TEST CUBES"
cube_test = cmds.polyCube()[0]
cube_a = cmds.polyCube()[0]
cube_b = cmds.polyCube()[0]
cube_c = cmds.polyCube()[0]
cmds.setAttr(cube_a + ".translate", -1, 0, 0)
cmds.setAttr(cube_b + ".translate", 0, 0, 1)
cmds.setAttr(cube_c + ".translate", 1, 0, 0)
cmds.setAttr(cube_a + ".scale", 2, 1, 1)
cmds.setAttr(cube_b + ".scale", 1, 2, 1)
cmds.setAttr(cube_c + ".scale", 1, 1, 2)
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.connectAttr(cube_test + ".translateX", "blender.input[0]")
cmds.connectAttr(cube_test + ".translateZ", "blender.input[1]")
cmds.connectAttr("blender.output[0]", cube_test + ".scaleX")
cmds.connectAttr("blender.output[1]", cube_test + ".scaleY")
cmds.connectAttr("blender.output[2]", cube_test + ".scaleZ")
print "!! Creating pose 0"
cmds.connectAttr(cube_a + ".translateX", "blender.poses[0].poseInputs[0]")
cmds.connectAttr(cube_a + ".translateZ", "blender.poses[0].poseInputs[1]")
cmds.connectAttr(cube_a + ".scaleX", "blender.poses[0].poseValues[0]")
cmds.connectAttr(cube_a + ".scaleY", "blender.poses[0].poseValues[1]")
cmds.connectAttr(cube_a + ".scaleZ", "blender.poses[0].poseValues[2]")
print "!! Creating pose 1"
cmds.connectAttr(cube_b + ".translateX", "blender.poses[1].poseInputs[0]")
cmds.connectAttr(cube_b + ".translateZ", "blender.poses[1].poseInputs[1]")
cmds.connectAttr(cube_b + ".scaleX", "blender.poses[1].poseValues[0]")
cmds.connectAttr(cube_b + ".scaleY", "blender.poses[1].poseValues[1]")
cmds.connectAttr(cube_b + ".scaleZ", "blender.poses[1].poseValues[2]")
print "!! Creating pose 2"
cmds.connectAttr(cube_c + ".translateX", "blender.poses[2].poseInputs[0]")
cmds.connectAttr(cube_c + ".translateZ", "blender.poses[2].poseInputs[1]")
cmds.connectAttr(cube_c + ".scaleX", "blender.poses[2].poseValues[0]")
cmds.connectAttr(cube_c + ".scaleY", "blender.poses[2].poseValues[1]")
cmds.connectAttr(cube_c + ".scaleZ", "blender.poses[2].poseValues[2]")
success = True
except Exception, e:
success = False
print e
cmds.setAttr(cube_test + ".translate", -1, 0, 0)
self.assert_( cmds.getAttr(cube_test + ".scale") == [(2, 1, 1)], "Bad result" + str(cmds.getAttr(cube_test + ".scale") ))
cmds.setAttr(cube_test + ".translate", 0, 0, 1)
self.assert_( cmds.getAttr(cube_test + ".scale") == [(1, 2, 1)], "Bad result")
cmds.setAttr(cube_test + ".translate", 1, 0, 0)
self.assert_( cmds.getAttr(cube_test + ".scale") == [(1, 1, 2)], "Bad result" + str(cmds.getAttr(cube_test + ".scale")))
def test_cubes_multiquadratic(self):
print "###################################################"
print "TEST CUBES MULTIQUADRATIC"
cube_test = cmds.polyCube()[0]
cube_a = cmds.polyCube()[0]
cube_b = cmds.polyCube()[0]
cube_c = cmds.polyCube()[0]
cmds.setAttr(cube_a + ".translate", -1, 0, 0)
cmds.setAttr(cube_b + ".translate", 0, 0, 1)
cmds.setAttr(cube_c + ".translate", 1, 0, 0)
cmds.setAttr(cube_a + ".scale", 2, 1, 1)
cmds.setAttr(cube_b + ".scale", 1, 2, 1)
cmds.setAttr(cube_c + ".scale", 1, 1, 2)
success = self.create_rbfblender()
cmds.setAttr("blender.rbfKernel", 1)
cmds.setAttr("blender.blurParameter", 2.0)
self.assert_(success, "Failed to create the node.")
try:
cmds.connectAttr(cube_test + ".translateX", "blender.input[0]")
cmds.connectAttr(cube_test + ".translateZ", "blender.input[1]")
cmds.connectAttr("blender.output[0]", cube_test + ".scaleX")
cmds.connectAttr("blender.output[1]", cube_test + ".scaleY")
cmds.connectAttr("blender.output[2]", cube_test + ".scaleZ")
print "!! Creating pose 0"
cmds.connectAttr(cube_a + ".translateX", "blender.poses[0].poseInputs[0]")
cmds.connectAttr(cube_a + ".translateZ", "blender.poses[0].poseInputs[1]")
cmds.connectAttr(cube_a + ".scaleX", "blender.poses[0].poseValues[0]")
cmds.connectAttr(cube_a + ".scaleY", "blender.poses[0].poseValues[1]")
cmds.connectAttr(cube_a + ".scaleZ", "blender.poses[0].poseValues[2]")
print "!! Creating pose 1"
cmds.connectAttr(cube_b + ".translateX", "blender.poses[1].poseInputs[0]")
cmds.connectAttr(cube_b + ".translateZ", "blender.poses[1].poseInputs[1]")
cmds.connectAttr(cube_b + ".scaleX", "blender.poses[1].poseValues[0]")
cmds.connectAttr(cube_b + ".scaleY", "blender.poses[1].poseValues[1]")
cmds.connectAttr(cube_b + ".scaleZ", "blender.poses[1].poseValues[2]")
print "!! Creating pose 2"
cmds.connectAttr(cube_c + ".translateX", "blender.poses[2].poseInputs[0]")
cmds.connectAttr(cube_c + ".translateZ", "blender.poses[2].poseInputs[1]")
cmds.connectAttr(cube_c + ".scaleX", "blender.poses[2].poseValues[0]")
cmds.connectAttr(cube_c + ".scaleY", "blender.poses[2].poseValues[1]")
cmds.connectAttr(cube_c + ".scaleZ", "blender.poses[2].poseValues[2]")
success = True
except Exception, e:
success = False
print e
cmds.setAttr(cube_test + ".translate", -1, 0, 0)
result = cmds.getAttr(cube_test + ".scale")[0]
self.assertAlmostEquals( result[0], 2.0 , 4, "Bad result for thin plate" + str(cmds.getAttr(cube_test + ".scale") ))
self.assertAlmostEquals( result[1], 1.0 ,4, "Bad result for thin plate" + str(cmds.getAttr(cube_test + ".scale") ))
self.assertAlmostEquals( result[2], 1.0 ,4, "Bad result for thin plate" + str(cmds.getAttr(cube_test + ".scale") ))
def test_cubes_thin_plate(self):
print "###################################################"
print "TEST CUBES THIN PLATE KERNEL"
cube_test = cmds.polyCube()[0]
cube_a = cmds.polyCube()[0]
cube_b = cmds.polyCube()[0]
cube_c = cmds.polyCube()[0]
cmds.setAttr(cube_a + ".translate", -1, 0, 0)
cmds.setAttr(cube_b + ".translate", 0, 0, 1)
cmds.setAttr(cube_c + ".translate", 1, 0, 0)
cmds.setAttr(cube_a + ".scale", 2, 1, 1)
cmds.setAttr(cube_b + ".scale", 1, 2, 1)
cmds.setAttr(cube_c + ".scale", 1, 1, 2)
success = self.create_rbfblender()
cmds.setAttr("blender.rbfKernel", 4)
cmds.setAttr("blender.blurParameter", 2.0)
self.assert_(success, "Failed to create the node.")
try:
cmds.connectAttr(cube_test + ".translateX", "blender.input[0]")
cmds.connectAttr(cube_test + ".translateZ", "blender.input[1]")
cmds.connectAttr("blender.output[0]", cube_test + ".scaleX")
cmds.connectAttr("blender.output[1]", cube_test + ".scaleY")
cmds.connectAttr("blender.output[2]", cube_test + ".scaleZ")
print "!! Creating pose 0"
cmds.connectAttr(cube_a + ".translateX", "blender.poses[0].poseInputs[0]")
cmds.connectAttr(cube_a + ".translateZ", "blender.poses[0].poseInputs[1]")
cmds.connectAttr(cube_a + ".scaleX", "blender.poses[0].poseValues[0]")
cmds.connectAttr(cube_a + ".scaleY", "blender.poses[0].poseValues[1]")
cmds.connectAttr(cube_a + ".scaleZ", "blender.poses[0].poseValues[2]")
print "!! Creating pose 1"
cmds.connectAttr(cube_b + ".translateX", "blender.poses[1].poseInputs[0]")
cmds.connectAttr(cube_b + ".translateZ", "blender.poses[1].poseInputs[1]")
cmds.connectAttr(cube_b + ".scaleX", "blender.poses[1].poseValues[0]")
cmds.connectAttr(cube_b + ".scaleY", "blender.poses[1].poseValues[1]")
cmds.connectAttr(cube_b + ".scaleZ", "blender.poses[1].poseValues[2]")
print "!! Creating pose 2"
cmds.connectAttr(cube_c + ".translateX", "blender.poses[2].poseInputs[0]")
cmds.connectAttr(cube_c + ".translateZ", "blender.poses[2].poseInputs[1]")
cmds.connectAttr(cube_c + ".scaleX", "blender.poses[2].poseValues[0]")
cmds.connectAttr(cube_c + ".scaleY", "blender.poses[2].poseValues[1]")
cmds.connectAttr(cube_c + ".scaleZ", "blender.poses[2].poseValues[2]")
success = True
except Exception, e:
success = False
print e
cmds.setAttr(cube_test + ".translate", -1, 0, 0)
result = cmds.getAttr(cube_test + ".scale")[0]
self.assertAlmostEquals( result[0], 2.0 , 4, "Bad result for thin plate" + str(cmds.getAttr(cube_test + ".scale") ))
self.assertAlmostEquals( result[1], 1.0 ,4, "Bad result for thin plate" + str(cmds.getAttr(cube_test + ".scale") ))
self.assertAlmostEquals( result[2], 1.0 ,4, "Bad result for thin plate" + str(cmds.getAttr(cube_test + ".scale") ))
def test_current_pose_index(self):
print "###################################################"
print "TEST CURRENT_POSE_INDEX"
cube_test = cmds.polyCube()[0]
cube_a = cmds.polyCube()[0]
cube_b = cmds.polyCube()[0]
cube_c = cmds.polyCube()[0]
cmds.setAttr(cube_a + ".translate", -1, 0, 0)
cmds.setAttr(cube_b + ".translate", 0, 0, 1)
cmds.setAttr(cube_c + ".translate", 1, 0, 0)
cmds.setAttr(cube_a + ".scale", 2, 1, 1)
cmds.setAttr(cube_b + ".scale", 1, 2, 1)
cmds.setAttr(cube_c + ".scale", 1, 1, 2)
success = self.create_rbfblender()
self.assert_(success, "Failed to create the node.")
try:
cmds.connectAttr(cube_test + ".translateX", "blender.input[0]")
cmds.connectAttr(cube_test + ".translateZ", "blender.input[1]")
cmds.connectAttr("blender.output[0]", cube_test + ".scaleX")
cmds.connectAttr("blender.output[1]", cube_test + ".scaleY")
cmds.connectAttr("blender.output[2]", cube_test + ".scaleZ")
print "!! Creating pose 0"
cmds.connectAttr(cube_a + ".translateX", "blender.poses[0].poseInputs[0]")
cmds.connectAttr(cube_a + ".translateZ", "blender.poses[0].poseInputs[1]")
cmds.connectAttr(cube_a + ".scaleX", "blender.poses[0].poseValues[0]")
cmds.connectAttr(cube_a + ".scaleY", "blender.poses[0].poseValues[1]")
cmds.connectAttr(cube_a + ".scaleZ", "blender.poses[0].poseValues[2]")
print "!! Creating pose 1"
cmds.connectAttr(cube_b + ".translateX", "blender.poses[1].poseInputs[0]")
cmds.connectAttr(cube_b + ".translateZ", "blender.poses[1].poseInputs[1]")
cmds.connectAttr(cube_b + ".scaleX", "blender.poses[1].poseValues[0]")
cmds.connectAttr(cube_b + ".scaleY", "blender.poses[1].poseValues[1]")
cmds.connectAttr(cube_b + ".scaleZ", "blender.poses[1].poseValues[2]")
print "!! Creating pose 2"
cmds.connectAttr(cube_c + ".translateX", "blender.poses[2].poseInputs[0]")
cmds.connectAttr(cube_c + ".translateZ", "blender.poses[2].poseInputs[1]")
cmds.connectAttr(cube_c + ".scaleX", "blender.poses[2].poseValues[0]")
cmds.connectAttr(cube_c + ".scaleY", "blender.poses[2].poseValues[1]")
cmds.connectAttr(cube_c + ".scaleZ", "blender.poses[2].poseValues[2]")
success = True
except Exception, e:
success = False
print e
cmds.setAttr(cube_test + ".translate", -1, 0, 0)
self.assert_( cmds.getAttr("blender.currentPoseIndex") == 0, "Bad result - expected 0 got " + str(cmds.getAttr("blender.currentPoseIndex") ))
cmds.setAttr(cube_test + ".translate", 0, 0, 1)
self.assert_( cmds.getAttr("blender.currentPoseIndex") == 1, "Bad result - expected 1 got " + str(cmds.getAttr("blender.currentPoseIndex") ))
cmds.setAttr(cube_test + ".translate", 1, 0, 0)
self.assert_( cmds.getAttr("blender.currentPoseIndex") == 2, "Bad result - expected 1 got " + str(cmds.getAttr("blender.currentPoseIndex") ))
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden, HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST, require_GET
from pytz import timezone as timezone_pytz
from courses.models import Course
from groups.models import Group
from mail.common import EmailRenderer
from mail.models import Message
from users.model_user_status import get_statuses
from users.models import UserProfile
MONTH = {
1: _(u"january"),
2: _(u"february"),
3: _(u"march"),
4: _(u"april"),
5: _(u"may"),
6: _(u"june"),
7: _(u"july"),
8: _(u"august"),
9: _(u"september"),
10: _(u"october"),
11: _(u"november"),
12: _(u"december")
}
@require_GET
@login_required
def mail_page(request):
user = request.user
user_profile = user.profile
users_from_staff_len = {}
if user.is_staff and 'from_staff' in request.GET and 'user_ids_send_mail_counter' in request.session:
key = 'user_ids_send_mail_' + request.GET['from_staff']
if key in request.session:
users_from_staff_len = {
'index': request.GET['from_staff'],
'length': len(request.session[key]),
}
if user.is_staff:
courses_teacher = Course.objects.filter(is_active=True)
else:
courses_teacher = Course.objects.filter(teachers=user, is_active=True)
context = {
"user": user,
"user_profile": user_profile,
"courses_teacher": courses_teacher,
'user_statuses': get_statuses(),
"users_from_staff_len": users_from_staff_len,
"snow_alert_message_fulltext": hasattr(settings, 'SEND_MESSAGE_FULLTEXT') and settings.SEND_MESSAGE_FULLTEXT,
}
return render(request, 'mail.html', context)
@require_GET
@login_required
def ajax_get_mailbox(request):
response = dict()
user = request.user
user_profile = user.profile
datatable_data = dict(request.GET)
if "draw" not in datatable_data:
return HttpResponseForbidden()
if "make_read[]" in datatable_data:
if datatable_data["make_read[]"][0] == "all":
user_profile.unread_messages.clear()
user_profile.send_notify_messages.clear()
else:
user_profile.unread_messages = list(
user_profile.unread_messages
.exclude(id__in=datatable_data["make_read[]"])
.values_list("id", flat=True)
)
user_profile.send_notify_messages = list(
user_profile.send_notify_messages
.exclude(id__in=datatable_data["make_read[]"])
.values_list("id", flat=True)
)
if "make_unread[]" in datatable_data:
user_profile.unread_messages.add(*Message.objects.filter(id__in=datatable_data["make_unread[]"]))
if "make_delete[]" in datatable_data:
user_profile.deleted_messages.add(*Message.objects.filter(id__in=datatable_data["make_delete[]"]))
if "make_undelete[]" in datatable_data:
user_profile.deleted_messages = list(
user_profile.deleted_messages
.exclude(id__in=datatable_data["make_undelete[]"])
.values_list("id", flat=True)
)
messages = Message.objects.none()
messages_deleted = user_profile.deleted_messages.all()
type_msg = datatable_data['type'][0]
if type_msg == "inbox":
messages = Message.objects.filter(recipients=user).exclude(id__in=messages_deleted)
elif type_msg == "sent":
messages = Message.objects.filter(sender=user).exclude(id__in=messages_deleted)
elif type_msg == "trash":
messages = messages_deleted
data = list()
start = int(datatable_data['start'][0])
end = start + int(datatable_data['length'][0])
unread = user_profile.unread_messages.all()
for msg in messages[start:end]:
data.append({
"0": "",
"1": u'%s %s' % (msg.sender.last_name, msg.sender.first_name),
"2": msg.title,
"3": format_date(msg.create_time.astimezone(timezone_pytz(user_profile.time_zone))),
"DT_RowClass": "unread" if msg in unread else "",
"DT_RowId": "row_msg_" + type_msg + "_" + str(msg.id),
"DT_RowData": {
"id": msg.id
},
})
response['draw'] = datatable_data['draw']
response['recordsTotal'] = messages.count()
response['recordsFiltered'] = messages.count()
response['data'] = data
response['unread_count'] = user_profile.get_unread_count()
response['type'] = type_msg
return HttpResponse(json.dumps(response),
content_type="application/json")
def format_date(date):
date_str = ""
now = timezone.now()
if now.year == date.year:
if now.day == date.day and now.month == date.month:
date_str = date.strftime("%H:%M")
else:
date_str = unicode(date.day) + u" " + MONTH[date.month]
else:
date_str = date.strftime("%d.%m.%y")
return date_str
@require_GET
@login_required
def ajax_get_message(request):
response = dict()
user = request.user
user_profile = user.profile
if "msg_id" not in request.GET:
return HttpResponseForbidden()
msg_id = int(request.GET["msg_id"])
message = Message.objects.get(id=msg_id)
if message.sender != user and user not in message.recipients.all():
return HttpResponseForbidden()
unread_count = int(request.GET["unread_count"])
if message in user_profile.unread_messages.all():
message.read_message(user)
unread_count -= 1
recipients_user = []
recipients_group = []
recipients_course = []
recipients_status = []
if message.hidden_copy and message.sender != user:
recipients_user.append({
"id": user.id,
"fullname": u'%s %s' % (user.last_name, user.first_name),
"url": user.get_absolute_url()
})
else:
for recipient in message.recipients_user.all():
recipients_user.append({
"id": recipient.id,
"fullname": u'%s %s' % (recipient.last_name, recipient.first_name),
"url": recipient.get_absolute_url()
})
for group in message.recipients_group.all():
recipients_group.append({
"id": group.id,
"name": group.name
})
for course in message.recipients_course.all():
recipients_course.append({
"id": course.id,
"name": course.name,
"url": course.get_absolute_url(),
})
for status in message.recipients_status.all():
recipients_status.append({
"id": status.id,
"name": status.name
})
if message.sender != user or request.GET["mailbox"] == 'inbox':
text = EmailRenderer.fill_name(message, user)
else:
text = message.text
response['sender'] = {
"id": message.sender.id,
"fullname": u'%s %s' % (message.sender.last_name, message.sender.first_name),
"url": message.sender.get_absolute_url(),
"avatar": message.sender.profile.avatar.url if message.sender.profile.avatar else "",
}
response['recipients_user'] = recipients_user
response['recipients_group'] = recipients_group
response['recipients_course'] = recipients_course
response['recipients_status'] = recipients_status
response['date'] = message.create_time.astimezone(timezone_pytz(user_profile.time_zone))\
.strftime("%d.%m.%y %H:%M:%S")
response['text'] = text
response['unread_count'] = unread_count
return HttpResponse(json.dumps(response),
content_type="application/json")
@require_POST
@login_required
def ajax_send_message(request):
user = request.user
data = dict(request.POST)
hidden_copy = False
if 'hidden_copy' in data and data['hidden_copy'][0]:
hidden_copy = True
variable = False
if 'variable' in data and data['variable'][0]:
variable = True
message = Message()
message.sender = user
message.title = data['new_title'][0]
message.text = data['new_text'][0]
message.hidden_copy = hidden_copy
message.variable = variable
message.save()
recipients_ids = set()
if "new_recipients_user[]" in data or "new_recipients_preinit[]" in data:
users = data.get("new_recipients_user[]", [])
if "new_recipients_preinit[]" in data:
users += request.session.get('user_ids_send_mail_' + data["new_recipients_preinit[]"][0], [])
message.recipients_user = users
recipients_ids.update(message.recipients_user.values_list('id', flat=True))
group_ids = []
if "new_recipients_group[]" in data:
message.recipients_group = data["new_recipients_group[]"]
for group in Group.objects.filter(id__in=data["new_recipients_group[]"]):
recipients_ids.update(group.students.exclude(id=user.id).values_list('id', flat=True))
group_ids.append(group.id)
if "new_recipients_course[]" in data:
message.recipients_course = data["new_recipients_course[]"]
for course in Course.objects.filter(id__in=data["new_recipients_course[]"]):
for group in course.groups.exclude(id__in=group_ids).distinct():
recipients_ids.update(group.students.exclude(id=user.id).values_list('id', flat=True))
if "new_recipients_status[]" in data:
message.recipients_status = data["new_recipients_status[]"]
recipients_ids.update(UserProfile.objects.filter(user_status__in=data["new_recipients_status[]"])
.values_list('user__id', flat=True))
message.recipients = list(recipients_ids)
return HttpResponse("OK")
|
|
"""
Python USBTMC driver
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import usb.core
import usb.util
import struct
import time
import os
import re
import sys
# constants
USBTMC_bInterfaceClass = 0xFE
USBTMC_bInterfaceSubClass = 3
USBTMC_bInterfaceProtocol = 0
USB488_bInterfaceProtocol = 1
USBTMC_MSGID_DEV_DEP_MSG_OUT = 1
USBTMC_MSGID_REQUEST_DEV_DEP_MSG_IN = 2
USBTMC_MSGID_DEV_DEP_MSG_IN = 2
USBTMC_MSGID_VENDOR_SPECIFIC_OUT = 126
USBTMC_MSGID_REQUEST_VENDOR_SPECIFIC_IN = 127
USBTMC_MSGID_VENDOR_SPECIFIC_IN = 127
USB488_MSGID_TRIGGER = 128
USBTMC_STATUS_SUCCESS = 0x01
USBTMC_STATUS_PENDING = 0x02
USBTMC_STATUS_FAILED = 0x80
USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS = 0x81
USBTMC_STATUS_SPLIT_NOT_IN_PROGRESS = 0x82
USBTMC_STATUS_SPLIT_IN_PROGRESS = 0x83
USB488_STATUS_INTERRUPT_IN_BUSY = 0x20
USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT = 1
USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS = 2
USBTMC_REQUEST_INITIATE_ABORT_BULK_IN = 3
USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS = 4
USBTMC_REQUEST_INITIATE_CLEAR = 5
USBTMC_REQUEST_CHECK_CLEAR_STATUS = 6
USBTMC_REQUEST_GET_CAPABILITIES = 7
USBTMC_REQUEST_INDICATOR_PULSE = 64
USB488_READ_STATUS_BYTE = 128
USB488_REN_CONTROL = 160
USB488_GOTO_LOCAL = 161
USB488_LOCAL_LOCKOUT = 162
USBTMC_HEADER_SIZE = 12
RIGOL_QUIRK_PIDS = [0x04ce, 0x0588]
def parse_visa_resource_string(resource_string):
# valid resource strings:
# USB::1234::5678::INSTR
# USB::1234::5678::SERIAL::INSTR
# USB0::0x1234::0x5678::INSTR
# USB0::0x1234::0x5678::SERIAL::INSTR
m = re.match('^(?P<prefix>(?P<type>USB)\d*)(::(?P<arg1>[^\s:]+))'
'(::(?P<arg2>[^\s:]+(\[.+\])?))(::(?P<arg3>[^\s:]+))?'
'(::(?P<suffix>INSTR))$', resource_string, re.I)
if m is not None:
return dict(
type=m.group('type').upper(),
prefix=m.group('prefix'),
arg1=m.group('arg1'),
arg2=m.group('arg2'),
arg3=m.group('arg3'),
suffix=m.group('suffix')
)
# Exceptions
class UsbtmcException(Exception):
em = {0: "No error"}
def __init__(self, err=None, note=None):
self.err = err
self.note = note
self.msg = ''
if err is None:
self.msg = note
else:
if type(err) is int:
if err in self.em:
self.msg = "%d: %s" % (err, self.em[err])
else:
self.msg = "%d: Unknown error" % err
else:
self.msg = err
if note is not None:
self.msg = "%s [%s]" % (self.msg, note)
def __str__(self):
return self.msg
def list_devices():
"List all connected USBTMC devices"
def is_usbtmc_device(dev):
for cfg in dev:
d = usb.util.find_descriptor(cfg, bInterfaceClass=USBTMC_bInterfaceClass,
bInterfaceSubClass=USBTMC_bInterfaceSubClass)
if d is not None:
return True
if dev.idVendor == 0x1334:
# Advantest
return True
if dev.idVendor == 0x0957:
# Agilent
if dev.idProduct in [0x2818, 0x4218, 0x4418]:
# Agilent U27xx modular devices in firmware update mode
# 0x2818 for U2701A/U2702A (firmware update mode on power up)
# 0x4218 for U2722A (firmware update mode on power up)
# 0x4418 for U2723A (firmware update mode on power up)
return True
return False
return list(usb.core.find(find_all=True, custom_match=is_usbtmc_device))
def list_resources():
"List resource strings for all connected USBTMC devices"
res = []
for dev in list_devices():
idVendor = dev.idVendor
idProduct = dev.idProduct
# "fix" IDs for devices in firmware update mode
if idVendor == 0x0957 and idProduct == 0x2818:
# Agilent U2701A/U2702A firmware update mode
idProduct = 0x2918
if idVendor == 0x0957 and idProduct == 0x4218:
# Agilent U2722A firmware update mode
idProduct = 0x4118
if idVendor == 0x0957 and idProduct == 0x4418:
# Agilent U2723A firmware update mode
idProduct = 0x4318
# attempt to read serial number
iSerial = None
try:
iSerial = dev.serial_number
except:
pass
# append formatted resource string to list
if iSerial is None:
res.append("USB::%d::%d::INSTR" % (idVendor, idProduct))
else:
res.append("USB::%d::%d::%s::INSTR" % (idVendor, idProduct, iSerial))
return res
def find_device(idVendor=None, idProduct=None, iSerial=None):
"Find USBTMC instrument"
devs = list_devices()
if len(devs) == 0:
return None
for dev in devs:
# match VID and PID
found = dev.idVendor == idVendor and dev.idProduct == idProduct
if idVendor == 0x0957 and idProduct == 0x2918:
# Agilent U2701A/U2702A firmware update mode
if dev.idVendor == idVendor and dev.idProduct == 0x2818:
found = True
if idVendor == 0x0957 and idProduct == 0x4118:
# Agilent U2722A firmware update mode
if dev.idVendor == idVendor and dev.idProduct == 0x4218:
found = True
if idVendor == 0x0957 and idProduct == 0x4318:
# Agilent U2723A firmware update mode
if dev.idVendor == idVendor and dev.idProduct == 0x4418:
found = True
if not found:
continue
if iSerial is None:
return dev
else:
s = ''
# try reading serial number
try:
s = dev.serial_number
except:
pass
if iSerial == s:
return dev
return None
class Instrument(object):
"USBTMC instrument interface client"
def __init__(self, *args, **kwargs):
"Create new USBTMC instrument object"
self.idVendor = 0
self.idProduct = 0
self.iSerial = None
self.device = None
self.cfg = None
self.iface = None
self.term_char = None
self.bcdUSBTMC = 0
self.support_pulse = False
self.support_talk_only = False
self.support_listen_only = False
self.support_term_char = False
self.bcdUSB488 = 0
self.support_USB4882 = False
self.support_remote_local = False
self.support_trigger = False
self.support_scpi = False
self.support_SR = False
self.support_RL = False
self.support_DT = False
self.max_transfer_size = 1024*1024
self.timeout = 5.0
self.bulk_in_ep = None
self.bulk_out_ep = None
self.interrupt_in_ep = None
self.last_btag = 0
self.last_rstb_btag = 0
self.connected = False
self.reattach = []
self.old_cfg = None
# quirks
self.advantest_quirk = False
self.advantest_locked = False
self.rigol_quirk = False
self.rigol_quirk_ieee_block = False
resource = None
# process arguments
if len(args) == 1:
if type(args[0]) == str:
resource = args[0]
else:
self.device = args[0]
if len(args) >= 2:
self.idVendor = args[0]
self.idProduct = args[1]
if len(args) >= 3:
self.iSerial = args[2]
for op in kwargs:
val = kwargs[op]
if op == 'idVendor':
self.idVendor = val
elif op == 'idProduct':
self.idProduct = val
elif op == 'iSerial':
self.iSerial = val
elif op == 'device':
self.device = val
elif op == 'dev':
self.device = val
elif op == 'term_char':
self.term_char = val
elif op == 'resource':
resource = val
if resource is not None:
res = parse_visa_resource_string(resource)
if res is None:
raise UsbtmcException("Invalid resource string", 'init')
if res['arg1'] is None and res['arg2'] is None:
raise UsbtmcException("Invalid resource string", 'init')
self.idVendor = int(res['arg1'], 0)
self.idProduct = int(res['arg2'], 0)
self.iSerial = res['arg3']
# find device
if self.device is None:
if self.idVendor is None or self.idProduct is None:
raise UsbtmcException("No device specified", 'init')
else:
self.device = find_device(self.idVendor, self.idProduct, self.iSerial)
if self.device is None:
raise UsbtmcException("Device not found", 'init')
def __del__(self):
if self.connected:
self.close()
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, val):
self._timeout = val
self._timeout_ms = int(val * 1000)
def open(self):
if self.connected:
return
# initialize device
if self.device.idVendor == 0x0957 and self.device.idProduct in [0x2818, 0x4218, 0x4418]:
# Agilent U27xx modular devices
# U2701A/U2702A, U2722A/U2723A
# These devices require a short initialization sequence, presumably
# to take them out of 'firmware update' mode after confirming
# that the firmware version is correct. This is required once
# on every power-on before the device can be used.
# Note that the device will reset and the product ID will change.
# U2701A/U2702A boot 0x2818, usbtmc 0x2918
# U2722A boot 0x4218, usbtmc 0x4118
# U2723A boot 0x4418, usbtmc 0x4318
serial = self.device.serial_number
new_id = 0
if self.device.idProduct == 0x2818:
# U2701A/U2702A
new_id = 0x2918
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x047E, data_or_wLength=0x0001)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x047D, data_or_wLength=0x0006)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x0484, data_or_wLength=0x0005)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x0472, data_or_wLength=0x000C)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x047A, data_or_wLength=0x0001)
self.device.ctrl_transfer(bmRequestType=0x40, bRequest=0x0C, wValue=0x0000, wIndex=0x0475, data_or_wLength=b'\x00\x00\x01\x01\x00\x00\x08\x01')
if self.device.idProduct in [0x4218, 0x4418]:
# U2722A/U2723A
if self.device.idProduct == 0x4218:
# U2722A
new_id = 0x4118
elif self.device.idProduct == 0x4418:
# U2723A
new_id = 0x4318
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x047E, data_or_wLength=0x0001)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x047D, data_or_wLength=0x0006)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x0487, data_or_wLength=0x0005)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x0472, data_or_wLength=0x000C)
self.device.ctrl_transfer(bmRequestType=0xC0, bRequest=0x0C, wValue=0x0000, wIndex=0x047A, data_or_wLength=0x0001)
self.device.ctrl_transfer(bmRequestType=0x40, bRequest=0x0C, wValue=0x0000, wIndex=0x0475, data_or_wLength=b'\x00\x00\x01\x01\x00\x00\x08\x01')
usb.util.dispose_resources(self.device)
self.device = None
for i in range(40):
self.device = find_device(0x0957, new_id, serial)
if self.device is not None:
break
time.sleep(0.5)
if self.device is None:
print("Agilent U27xx modular device initialization failed")
# find first USBTMC interface
for cfg in self.device:
for iface in cfg:
if (iface.bInterfaceClass == USBTMC_bInterfaceClass and
iface.bInterfaceSubClass == USBTMC_bInterfaceSubClass):
# USBTMC device
self.cfg = cfg
self.iface = iface
break
elif (self.device.idVendor == 0x1334):
# Advantest
self.cfg = cfg
self.iface = iface
break
else:
continue
break
if self.iface is None:
raise UsbtmcException("Not a USBTMC device", 'init')
try:
self.old_cfg = self.device.get_active_configuration()
except usb.core.USBError:
# ignore exception if configuration is not set
pass
if self.old_cfg is not None and self.old_cfg.bConfigurationValue == self.cfg.bConfigurationValue:
# already set to correct configuration
# release kernel driver on USBTMC interface
self._release_kernel_driver(self.iface.bInterfaceNumber)
else:
# wrong configuration or configuration not set
# release all kernel drivers
if self.old_cfg is not None:
for iface in self.old_cfg:
self._release_kernel_driver(iface.bInterfaceNumber)
# set proper configuration
self.device.set_configuration(self.cfg)
# claim interface
usb.util.claim_interface(self.device, self.iface)
# don't need to set altsetting - USBTMC devices have 1 altsetting as per the spec
# find endpoints
for ep in self.iface:
ep_dir = usb.util.endpoint_direction(ep.bEndpointAddress)
ep_type = usb.util.endpoint_type(ep.bmAttributes)
if (ep_type == usb.util.ENDPOINT_TYPE_BULK):
if (ep_dir == usb.util.ENDPOINT_IN):
self.bulk_in_ep = ep
elif (ep_dir == usb.util.ENDPOINT_OUT):
self.bulk_out_ep = ep
elif (ep_type == usb.util.ENDPOINT_TYPE_INTR):
if (ep_dir == usb.util.ENDPOINT_IN):
self.interrupt_in_ep = ep
if self.bulk_in_ep is None or self.bulk_out_ep is None:
raise UsbtmcException("Invalid endpoint configuration", 'init')
# set quirk flags if necessary
if self.device.idVendor == 0x1334:
# Advantest/ADCMT devices have a very odd USBTMC implementation
# which requires max 63 byte reads and never signals EOI on read
self.max_transfer_size = 63
self.advantest_quirk = True
if self.device.idVendor == 0x1ab1 and self.device.idProduct in RIGOL_QUIRK_PIDS:
self.rigol_quirk = True
if self.device.idProduct == 0x04ce:
self.rigol_quirk_ieee_block = True
self.connected = True
self.clear()
self.get_capabilities()
def close(self):
if not self.connected:
return
usb.util.dispose_resources(self.device)
try:
# reset configuration
if self.cfg.bConfigurationValue != self.old_cfg.bConfigurationValue:
self.device.set_configuration(self.old_cfg)
# try to reattach kernel driver
for iface in self.reattach:
try:
self.device.attach_kernel_driver(iface)
except:
pass
except:
pass
self.reattach = []
self.connected = False
def is_usb488(self):
return self.iface.bInterfaceProtocol == USB488_bInterfaceProtocol
def get_capabilities(self):
if not self.connected:
self.open()
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USBTMC_REQUEST_GET_CAPABILITIES,
0x0000,
self.iface.index,
0x0018,
timeout=self._timeout_ms)
if (b[0] == USBTMC_STATUS_SUCCESS):
self.bcdUSBTMC = (b[3] << 8) + b[2]
self.support_pulse = b[4] & 4 != 0
self.support_talk_only = b[4] & 2 != 0
self.support_listen_only = b[4] & 1 != 0
self.support_term_char = b[5] & 1 != 0
if self.is_usb488():
self.bcdUSB488 = (b[13] << 8) + b[12]
self.support_USB4882 = b[4] & 4 != 0
self.support_remote_local = b[4] & 2 != 0
self.support_trigger = b[4] & 1 != 0
self.support_scpi = b[4] & 8 != 0
self.support_SR = b[4] & 4 != 0
self.support_RL = b[4] & 2 != 0
self.support_DT = b[4] & 1 != 0
else:
raise UsbtmcException("Get capabilities failed", 'get_capabilities')
def pulse(self):
"""
Send a pulse indicator request, this should blink a light
for 500-1000ms and then turn off again. (Only if supported)
"""
if not self.connected:
self.open()
if self.support_pulse:
b = self.device.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
USBTMC_REQUEST_INDICATOR_PULSE,
0x0000,
self.iface.index,
0x0001,
timeout=self._timeout_ms)
if (b[0] != USBTMC_STATUS_SUCCESS):
raise UsbtmcException("Pulse failed", 'pulse')
# message header management
def pack_bulk_out_header(self, msgid):
self.last_btag = btag = (self.last_btag % 255) + 1
return struct.pack('BBBx', msgid, btag, ~btag & 0xFF)
def pack_dev_dep_msg_out_header(self, transfer_size, eom = True):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_DEV_DEP_MSG_OUT)
return hdr+struct.pack("<LBxxx", transfer_size, eom)
def pack_dev_dep_msg_in_header(self, transfer_size, term_char = None):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_DEV_DEP_MSG_IN)
transfer_attributes = 0
if term_char is None:
term_char = 0
else:
transfer_attributes = 2
term_char = self.term_char
return hdr+struct.pack("<LBBxx", transfer_size, transfer_attributes, term_char)
def pack_vendor_specific_out_header(self, transfer_size):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_VENDOR_SPECIFIC_OUT)
return hdr+struct.pack("<Lxxxx", transfer_size)
def pack_vendor_specific_in_header(self, transfer_size):
hdr = self.pack_bulk_out_header(USBTMC_MSGID_VENDOR_SPECIFIC_IN)
return hdr+struct.pack("<Lxxxx", transfer_size)
def pack_usb488_trigger(self):
hdr = self.pack_bulk_out_header(USB488_MSGID_TRIGGER)
return hdr+b'\x00'*8
def unpack_bulk_in_header(self, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
return (msgid, btag, btaginverse)
def unpack_dev_dep_resp_header(self, data):
msgid, btag, btaginverse = self.unpack_bulk_in_header(data)
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
data = data[USBTMC_HEADER_SIZE:transfer_size+USBTMC_HEADER_SIZE]
return (msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
def write_raw(self, data):
"Write binary data to instrument"
if not self.connected:
self.open()
eom = False
num = len(data)
offset = 0
try:
while num > 0:
if num <= self.max_transfer_size:
eom = True
block = data[offset:offset+self.max_transfer_size]
size = len(block)
req = self.pack_dev_dep_msg_out_header(size, eom) + block + b'\0'*((4 - (size % 4)) % 4)
self.bulk_out_ep.write(req, timeout=self._timeout_ms)
offset += size
num -= size
except usb.core.USBError:
exc = sys.exc_info()[1]
if exc.errno == 110:
# timeout, abort transfer
self._abort_bulk_out()
raise
def read_raw(self, num=-1):
"Read binary data from instrument"
if not self.connected:
self.open()
read_len = self.max_transfer_size
if 0 < num < read_len:
read_len = num
eom = False
term_char = None
if self.term_char is not None:
term_char = self.term_char
read_data = b''
try:
while not eom:
if not self.rigol_quirk or read_data == b'':
# if the rigol sees this again, it will restart the transfer
# so only send it the first time
req = self.pack_dev_dep_msg_in_header(read_len, term_char)
self.bulk_out_ep.write(req, timeout=self._timeout_ms)
resp = self.bulk_in_ep.read(read_len+USBTMC_HEADER_SIZE+3, timeout=self._timeout_ms)
if sys.version_info >= (3, 2):
resp = resp.tobytes()
else:
resp = resp.tostring()
if self.rigol_quirk and read_data:
pass # do nothing, the packet has no header if it isn't the first
else:
msgid, btag, btaginverse, transfer_size, transfer_attributes, data = self.unpack_dev_dep_resp_header(resp)
if self.rigol_quirk:
# rigol devices only send the header in the first packet, and they lie about whether the transaction is complete
if read_data:
read_data += resp
else:
if self.rigol_quirk_ieee_block and data.startswith(b"#"):
# ieee block incoming, the transfer_size usbtmc header is lying about the transaction size
l = int(chr(data[1]))
n = int(data[2:l+2])
transfer_size = n + (l+2) # account for ieee header
read_data += data
if len(read_data) >= transfer_size:
read_data = read_data[:transfer_size] # as per usbtmc spec section 3.2 note 2
eom = True
else:
eom = False
else:
eom = transfer_attributes & 1
read_data += data
# Advantest devices never signal EOI and may only send one read packet
if self.advantest_quirk:
break
if num > 0:
num = num - len(data)
if num <= 0:
break
if num < read_len:
read_len = num
except usb.core.USBError:
exc = sys.exc_info()[1]
if exc.errno == 110:
# timeout, abort transfer
self._abort_bulk_in()
raise
return read_data
def ask_raw(self, data, num=-1):
"Write then read binary data"
# Advantest/ADCMT hardware won't respond to a command unless it's in Local Lockout mode
was_locked = self.advantest_locked
try:
if self.advantest_quirk and not was_locked:
self.lock()
self.write_raw(data)
return self.read_raw(num)
finally:
if self.advantest_quirk and not was_locked:
self.unlock()
def write(self, message, encoding='utf-8'):
"Write string to instrument"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
for message_i in message:
self.write(message_i, encoding)
return
self.write_raw(str(message).encode(encoding))
def read(self, num=-1, encoding='utf-8'):
"Read string from instrument"
return self.read_raw(num).decode(encoding).rstrip('\r\n')
def ask(self, message, num=-1, encoding='utf-8'):
"Write then read string"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
val = list()
for message_i in message:
val.append(self.ask(message_i, num, encoding))
return val
# Advantest/ADCMT hardware won't respond to a command unless it's in Local Lockout mode
was_locked = self.advantest_locked
try:
if self.advantest_quirk and not was_locked:
self.lock()
self.write(message, encoding)
return self.read(num, encoding)
finally:
if self.advantest_quirk and not was_locked:
self.unlock()
def read_stb(self):
"Read status byte"
if not self.connected:
self.open()
if self.is_usb488():
rstb_btag = (self.last_rstb_btag % 128) + 1
if rstb_btag < 2:
rstb_btag = 2
self.last_rstb_btag = rstb_btag
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
bRequest=USB488_READ_STATUS_BYTE,
wValue=rstb_btag,
wIndex=self.iface.index,
data_or_wLength=0x0003,
timeout=self._timeout_ms
)
if (b[0] == USBTMC_STATUS_SUCCESS):
# check btag
if rstb_btag != b[1]:
raise UsbtmcException("Read status byte btag mismatch", 'read_stb')
if self.interrupt_in_ep is None:
# no interrupt channel, value is here
return b[2]
else:
# read response from interrupt channel
resp = self.interrupt_in_ep.read(2, timeout=self._timeout_ms)
if resp[0] != rstb_btag + 128:
raise UsbtmcException("Read status byte btag mismatch", 'read_stb')
else:
return resp[1]
else:
raise UsbtmcException("Read status failed", 'read_stb')
else:
return int(self.ask("*STB?"))
def trigger(self):
"Send trigger command"
if not self.connected:
self.open()
if self.support_trigger:
data = self.pack_usb488_trigger()
print(repr(data))
self.bulk_out_ep.write(data, timeout=self._timeout_ms)
else:
self.write("*TRG")
def clear(self):
"Send clear command"
if not self.connected:
self.open()
# Send INITIATE_CLEAR
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
bRequest=USBTMC_REQUEST_INITIATE_CLEAR,
wValue=0x0000,
wIndex=self.iface.index,
data_or_wLength=0x0001,
timeout=self._timeout_ms
)
if (b[0] == USBTMC_STATUS_SUCCESS):
# Initiate clear succeeded, wait for completion
while True:
# Check status
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE),
bRequest=USBTMC_REQUEST_CHECK_CLEAR_STATUS,
wValue=0x0000,
wIndex=self.iface.index,
data_or_wLength=0x0002,
timeout=self._timeout_ms
)
time.sleep(0.1)
if (b[0] != USBTMC_STATUS_PENDING):
break
# Clear halt condition
self.bulk_out_ep.clear_halt()
else:
raise UsbtmcException("Clear failed", 'clear')
def _abort_bulk_out(self, btag=None):
"Abort bulk out"
if not self.connected:
return
if btag is None:
btag = self.last_btag
# Send INITIATE_ABORT_BULK_OUT
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_ENDPOINT),
bRequest=USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT,
wValue=btag,
wIndex=self.bulk_out_ep.bEndpointAddress,
data_or_wLength=0x0002,
timeout=self._timeout_ms
)
if (b[0] == USBTMC_STATUS_SUCCESS):
# Initiate abort bulk out succeeded, wait for completion
while True:
# Check status
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_ENDPOINT),
bRequest=USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS,
wValue=0x0000,
wIndex=self.bulk_out_ep.bEndpointAddress,
data_or_wLength=0x0008,
timeout=self._timeout_ms
)
time.sleep(0.1)
if (b[0] != USBTMC_STATUS_PENDING):
break
else:
# no transfer in progress; nothing to do
pass
def _abort_bulk_in(self, btag=None):
"Abort bulk in"
if not self.connected:
return
if btag is None:
btag = self.last_btag
# Send INITIATE_ABORT_BULK_IN
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_ENDPOINT),
bRequest=USBTMC_REQUEST_INITIATE_ABORT_BULK_IN,
wValue=btag,
wIndex=self.bulk_in_ep.bEndpointAddress,
data_or_wLength=0x0002,
timeout=self._timeout_ms
)
if (b[0] == USBTMC_STATUS_SUCCESS):
# Initiate abort bulk in succeeded, wait for completion
while True:
# Check status
b = self.device.ctrl_transfer(
bmRequestType=usb.util.build_request_type(usb.util.CTRL_IN, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_ENDPOINT),
bRequest=USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS,
wValue=0x0000,
wIndex=self.bulk_in_ep.bEndpointAddress,
data_or_wLength=0x0008,
timeout=self._timeout_ms
)
time.sleep(0.1)
if (b[0] != USBTMC_STATUS_PENDING):
break
else:
# no transfer in progress; nothing to do
pass
def remote(self):
"Send remote command"
raise NotImplementedError()
def local(self):
"Send local command"
raise NotImplementedError()
def lock(self):
"Send lock command"
if not self.connected:
self.open()
if self.advantest_quirk:
# This Advantest/ADCMT vendor-specific control command enables remote control and must be sent before any commands are exchanged
# (otherwise READ commands will only retrieve the latest measurement)
self.advantest_locked = True
self.device.ctrl_transfer(bmRequestType=0xA1, bRequest=0xA0, wValue=0x0001, wIndex=0x0000, data_or_wLength=1)
else:
raise NotImplementedError()
def unlock(self):
"Send unlock command"
if not self.connected:
self.open()
if self.advantest_quirk:
# This Advantest/ADCMT vendor-specific control command enables remote control and must be sent before any commands are exchanged
# (otherwise READ commands will only retrieve the latest measurement)
self.advantest_locked = False
self.device.ctrl_transfer(bmRequestType=0xA1, bRequest=0xA0, wValue=0x0000, wIndex=0x0000, data_or_wLength=1)
else:
raise NotImplementedError()
def advantest_read_myid(self):
if not self.connected:
self.open()
"Read MyID value from Advantest and ADCMT devices"
if self.advantest_quirk:
# This Advantest/ADCMT vendor-specific control command reads the "MyID" identifier
try:
return int(self.device.ctrl_transfer(bmRequestType=0xC1, bRequest=0xF5, wValue=0x0000, wIndex=0x0000, data_or_wLength=1)[0])
except:
return None
else:
raise NotImplementedError()
def _release_kernel_driver(self, interface_number):
if os.name == 'posix':
if self.device.is_kernel_driver_active(interface_number):
self.reattach.append(interface_number)
try:
self.device.detach_kernel_driver(interface_number)
except usb.core.USBError as e:
sys.exit(
"Could not detach kernel driver from interface({0}): {1}".format(interface_number,
str(e)))
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions import stack_select
from resource_management.libraries.resources import HdfsResource
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_stack_version import get_stack_version
import os
import status_params
def get_port_from_url(address):
if not is_empty(address):
return address.split(':')[-1]
else:
return address
# config object that holds the configurations declared in the -site.xml file
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
stack_name = default("/hostLevelParams/stack_name", None)
retryAble = default("/commandParams/command_retry_enabled", False)
version = default("/commandParams/version", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_version = default("/commandParams/version", None)
sudo = AMBARI_SUDO_BINARY
security_enabled = status_params.security_enabled
fs_root = config['configurations']['core-site']['fs.defaultFS']
solr_conf = "/etc/solr/conf"
solr_port = status_params.solr_port
solr_piddir = status_params.solr_piddir
solr_pidfile = status_params.solr_pidfile
user_group = config['configurations']['cluster-env']['user_group']
fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
# shared configs
java64_home = config['hostLevelParams']['java_home']
zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts_list.sort()
# get comma separated list of zookeeper hosts from clusterHostInfo
zookeeper_hosts = ",".join(zookeeper_hosts_list)
#####################################
# Solr configs
#####################################
# Only supporting SolrCloud mode - so hardcode those options
solr_cloudmode = 'true'
solr_dir = '/usr/iop/current/solr-server'
solr_client_dir = '/usr/iop/current/solr-client'
solr_bindir = solr_dir + '/bin'
cloud_scripts = solr_dir + '/server/scripts/cloud-scripts'
if "solr-env" in config['configurations']:
solr_hosts = config['clusterHostInfo']['solr_hosts']
solr_znode = default('/configurations/solr-env/solr_znode', '/solr')
solr_min_mem = default('/configurations/solr-env/solr_minmem', 1024)
solr_max_mem = default('/configurations/solr-env/solr_maxmem', 2048)
solr_instance_count = len(config['clusterHostInfo']['solr_hosts'])
solr_datadir = default('/configurations/solr-env/solr_datadir', '/opt/solr/data')
solr_data_resources_dir = os.path.join(solr_datadir, 'resources')
solr_jmx_port = default('/configurations/solr-env/solr_jmx_port', 18983)
solr_ssl_enabled = default('configurations/solr-env/solr_ssl_enabled', False)
solr_keystore_location = config['configurations']['solr-env']['solr_keystore_location']
solr_keystore_password = config['configurations']['solr-env']['solr_keystore_password']
solr_keystore_type = config['configurations']['solr-env']['solr_keystore_type']
solr_truststore_location = config['configurations']['solr-env']['solr_truststore_location']
solr_truststore_password = config['configurations']['solr-env']['solr_truststore_password']
solr_truststore_type = config['configurations']['solr-env']['solr_truststore_type']
solr_user = config['configurations']['solr-env']['solr_user']
solr_log_dir = config['configurations']['solr-env']['solr_log_dir']
solr_log = format("{solr_log_dir}/solr-install.log")
solr_env_content = config['configurations']['solr-env']['content']
solr_hdfs_home_dir = config['configurations']['solr-env']['solr_hdfs_home_dir']
if upgrade_direction is not None and upgrade_direction == Direction.UPGRADE:
old_lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
# get comma separated list of zookeeper hosts from clusterHostInfo
index = 0
zookeeper_quorum = ""
for host in config['clusterHostInfo']['zookeeper_hosts']:
zookeeper_quorum += host + ":" + str(zookeeper_port)
index += 1
if index < len(config['clusterHostInfo']['zookeeper_hosts']):
zookeeper_quorum += ","
solr_jaas_file = None
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
solr_jaas_file = solr_conf + '/solr_jaas.conf'
solr_kerberos_keytab = default('/configurations/solr-env/solr_kerberos_keytab', None)
if not solr_kerberos_keytab: #Maybe against older configurations during a downgrade operation. Look for the old property
solr_keytab=config['configurations']['solr-site']['solr.hdfs.security.kerberos.keytabfile']
solr_kerberos_keytab = solr_keytab
solr_kerberos_principal = default('/configurations/solr-env/solr_kerberos_principal', None)
if solr_kerberos_principal:
solr_kerberos_principal = solr_kerberos_principal.replace('_HOST',_hostname_lowercase)
else: #Maybe against older configurations during a downgrade operation. Look for the old property
solr_site = dict(config['configurations']['solr-site'])
solr_principal = solr_site['solr.hdfs.security.kerberos.principal']
solr_principal = solr_principal.replace('_HOST', _hostname_lowercase)
solr_site['solr.hdfs.security.kerberos.principal']=solr_principal
solr_kerberos_principal = solr_principal
solr_web_kerberos_keytab = config['configurations']['solr-env']['solr_web_kerberos_keytab']
solr_web_kerberos_principal = default('/configurations/solr-env/solr_web_kerberos_principal', None)
if solr_web_kerberos_principal:
solr_web_kerberos_principal = solr_web_kerberos_principal.replace('_HOST',_hostname_lowercase)
solr_kerberos_name_rules = config['configurations']['solr-env']['solr_kerberos_name_rules']
solr_xml_content = default('configurations/solr-xml/content', None)
solr_log4j_content = default('configurations/solr-log4j/content', None)
solr_client_custom_log4j = "solr-client-log4j" in config['configurations']
restart_during_downgrade = False
upgrade_direction = default("/commandParams/upgrade_direction", None)
restart_during_downgrade = (upgrade_direction == Direction.DOWNGRADE)
# *********************** RANGER PLUGIN CHANGES ***********************
# ranger host
# **********************************************************************
# get the correct version to use for checking stack features
version_for_stack_feature_checks = get_stack_feature_version(config)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
#need to set the defaut to false to satisfy downgrade from 4.2,5 to 4.2 or 4.1
is_supported_solr_ranger = default('/configurations/solr-env/is_supported_solr_ranger', False)
#ranger solr properties
if has_ranger_admin and is_supported_solr_ranger:
enable_ranger_solr = config['configurations']['ranger-solr-plugin-properties']['ranger-solr-plugin-enabled']
enable_ranger_solr = not is_empty(enable_ranger_solr) and enable_ranger_solr.lower() == 'yes'
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
xa_audit_db_flavor = xa_audit_db_flavor.lower() if xa_audit_db_flavor else None
xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
xa_audit_db_password = ''
if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
xa_db_host = config['configurations']['admin-properties']['db_host']
repo_name = str(config['clusterName']) + '_solr'
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-solr-plugin-properties']
ranger_solr_audit = config['configurations']['ranger-solr-audit']
ranger_solr_audit_attrs = config['configuration_attributes']['ranger-solr-audit']
ranger_solr_security = config['configurations']['ranger-solr-security']
ranger_solr_security_attrs = config['configuration_attributes']['ranger-solr-security']
ranger_solr_policymgr_ssl = config['configurations']['ranger-solr-policymgr-ssl']
ranger_solr_policymgr_ssl_attrs = config['configuration_attributes']['ranger-solr-policymgr-ssl']
policy_user = config['configurations']['ranger-solr-plugin-properties']['policy_user']
ranger_plugin_config = {
'username' : config['configurations']['ranger-solr-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : unicode(config['configurations']['ranger-solr-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
'solr.url' : config['configurations']['ranger-solr-plugin-properties']['solr.url'],
'commonNameForCertificate' : config['configurations']['ranger-solr-plugin-properties']['common.name.for.certificate']
}
solr_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': ranger_plugin_config,
'description': 'solr repo',
'name': repo_name,
'repositoryType': 'solr',
'type': 'solr',
'assetType': '1'
}
if stack_supports_ranger_kerberos and security_enabled:
ranger_plugin_config['policy.download.auth.users'] = solr_user
ranger_plugin_config['tag.download.auth.users'] = solr_user
ranger_plugin_config['ambari.service.check.user'] = policy_user
#For curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
previous_jdbc_jar_name = None
if stack_supports_ranger_audit_db:
if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
colon_count = xa_db_host.count(':')
if colon_count == 2 or colon_count == 0:
audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
else:
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
driver_curl_target = format("{solr_home}/libs/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
previous_jdbc_jar = format("{solr_home}/libs/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
xa_audit_db_is_enabled = False
ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
if xml_configurations_supported and stack_supports_ranger_audit_db:
xa_audit_db_is_enabled = config['configurations']['ranger-solr-audit']['xasecure.audit.destination.db']
xa_audit_hdfs_is_enabled = default('/configurations/ranger-solr-audit/xasecure.audit.destination.hdfs', False)
ssl_keystore_password = unicode(config['configurations']['ranger-solr-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
ssl_truststore_password = unicode(config['configurations']['ranger-solr-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
stack_version = get_stack_version('solr-server')
setup_ranger_env_sh_source = format('{stack_root}/{stack_version}/ranger-solr-plugin/install/conf.templates/enable/solr-ranger-env.sh')
setup_ranger_env_sh_target = format("{solr_conf}/solr-ranger-env.sh")
#For SQLA explicitly disable audit to DB for Ranger
if xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
namenode_hosts = default("/clusterHostInfo/namenode_host", [])
has_namenode = not len(namenode_hosts) == 0
# *********************** end RANGER PLUGIN CHANGES ****************
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
kinit_path_local = status_params.kinit_path_local
if 'ranger-env' in config['configurations']:
stack_root = Script.get_stack_root()
ranger_home = format('{stack_root}/current/ranger-admin')
audit_solr_enabled = default('/configurations/ranger-env/xasecure.audit.destination.solr', False)
ranger_solr_config_set = config['configurations']['ranger-env']['ranger_solr_config_set']
ranger_solr_collection_name = config['configurations']['ranger-env']['ranger_solr_collection_name']
ranger_solr_shards = config['configurations']['ranger-env']['ranger_solr_shards']
replication_factor = config['configurations']['ranger-env']['ranger_solr_replication_factor']
ranger_solr_conf = format('{solr_dir}/server/solr/configsets/ranger_audit_configs/conf')
is_solrCloud_enabled = default('/configurations/ranger-env/is_solrCloud_enabled', False)
is_external_solrCloud_enabled = default('/configurations/ranger-env/is_external_solrCloud_enabled', False)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs
)
|
|
import os
import sys
import lib.io
import lib.geo
from lib.exif import EXIF, verify_exif
from collections import OrderedDict
import datetime
'''
Sequence class for organizing/cleaning up photos in a folder
- split to sequences based on time intervals
- split to sequences based on gps distances
- remove duplicate images (e.g. waiting for red light, in traffic etc) @simonmikkelsen
'''
MAXIMUM_SEQUENCE_LENGTH = 1000
class Sequence(object):
def __init__(self, filepath, skip_folders=[], skip_subfolders=False, check_exif=True):
self.filepath = filepath
self._skip_folders = skip_folders
self._skip_subfolders = skip_subfolders
self.file_list = self.get_file_list(filepath, check_exif)
self.num_images = len(self.file_list)
def _is_skip(self, filepath):
'''
Skip photos in specified folders
- filepath/duplicates: it stores potential duplicate photos
detected by method 'remove_duplicates'
- filepath/success: it stores photos that have been successfully
'''
_is_skip = False
for folder in self._skip_folders:
if folder in filepath:
_is_skip = True
if self._skip_subfolders and filepath != self.filepath:
_is_skip = True
return _is_skip
def _read_capture_time(self, filename):
'''
Use EXIF class to parse capture time from EXIF.
'''
exif = EXIF(filename)
return exif.extract_capture_time()
def _read_lat_lon(self, filename):
'''
Use EXIF class to parse latitude and longitude from EXIF.
'''
exif = EXIF(filename)
lon, lat = exif.extract_lon_lat()
return lat, lon
def _read_direction(self, filename):
'''
Use EXIF class to parse compass direction from EXIF.
'''
exif = EXIF(filename)
direction = exif.extract_direction()
return direction
def get_file_list(self, filepath, check_exif=True):
'''
Get the list of JPEGs in the folder (nested folders)
'''
if filepath.lower().endswith(".jpg"):
# single file
file_list = [filepath]
else:
file_list = []
for root, sub_folders, files in os.walk(self.filepath):
if not self._is_skip(root):
image_files = [os.path.join(root, filename) for filename in files if (filename.lower().endswith(".jpg"))]
if check_exif:
image_files = [f for f in image_files if verify_exif(f)]
file_list += image_files
return file_list
def sort_file_list(self, file_list):
'''
Read capture times and sort files in time order.
'''
if len(file_list) == 0:
return [], []
capture_times = [self._read_capture_time(filepath) for filepath in file_list]
sorted_times_files = zip(capture_times, file_list)
sorted_times_files.sort()
return zip(*sorted_times_files)
def move_groups(self, groups, sub_path=''):
'''
Move the files in the groups to new folders.
'''
for i,group in enumerate(groups):
new_dir = os.path.join(self.filepath, sub_path, str(i))
lib.io.mkdir_p(new_dir)
for filepath in group:
os.rename(filepath, os.path.join(new_dir, os.path.basename(filepath)))
print("Moved {0} photos to {1}".format(len(group), new_dir))
def set_skip_folders(self, folders):
'''
Set folders to skip when iterating through the path
'''
self._skip_folders = folders
def set_file_list(self, file_list):
'''
Set file list for the sequence
'''
self.file_list = file_list
def split(self, cutoff_distance=500., cutoff_time=None, max_sequence_length=MAXIMUM_SEQUENCE_LENGTH, move_files=True, verbose=False, skip_cutoff=False):
'''
Split photos into sequences in case of large distance gap or large time interval
@params cutoff_distance: maximum distance gap in meters
@params cutoff_time: maximum time interval in seconds (if None, use 1.5 x median time interval in the sequence)
'''
file_list = self.file_list
groups = []
if len(file_list) >= 1:
# sort based on EXIF capture time
capture_times, file_list = self.sort_file_list(file_list)
# diff in capture time
capture_deltas = [t2-t1 for t1,t2 in zip(capture_times, capture_times[1:])]
# read gps for ordered files
latlons = [self._read_lat_lon(filepath) for filepath in file_list]
# distance between consecutive images
distances = [lib.geo.gps_distance(ll1, ll2) for ll1, ll2 in zip(latlons, latlons[1:])]
# if cutoff time is given use that, else assume cutoff is 1.5x median time delta
if cutoff_time is None:
if verbose:
print "Cut-off time is None"
median = sorted(capture_deltas)[len(capture_deltas)//2]
if type(median) is not int:
median = median.total_seconds()
cutoff_time = 1.5*median
# extract groups by cutting using cutoff time
group = [file_list[0]]
cut = 0
for i,filepath in enumerate(file_list[1:]):
cut_time = capture_deltas[i].total_seconds() > cutoff_time
cut_distance = distances[i] > cutoff_distance
cut_sequence_length = len(group) > max_sequence_length
if cut_time or cut_distance or cut_sequence_length:
cut += 1
# delta too big, save current group, start new
groups.append(group)
group = [filepath]
if verbose:
if cut_distance:
print 'Cut {}: Delta in distance {} meters is too bigger than cutoff_distance {} meters at {}'.format(cut,distances[i], cutoff_distance, file_list[i+1])
elif cut_time:
print 'Cut {}: Delta in time {} seconds is bigger then cutoff_time {} seconds at {}'.format(cut, capture_deltas[i].total_seconds(), cutoff_time, file_list[i+1])
elif cut_sequence_length:
print 'Cut {}: Maximum sequence length {} reached at {}'.format(cut, max_sequence_length, file_list[i+1])
else:
group.append(filepath)
groups.append(group)
# move groups to subfolders
if move_files:
self.move_groups(groups)
print("Done split photos in {} into {} sequences".format(self.filepath, len(groups)))
return groups
def interpolate_direction(self, offset=0):
'''
Interpolate bearing of photos in a sequence with an offset
@author: mprins
'''
bearings = {}
file_list = self.file_list
num_file = len(file_list)
if num_file > 1:
# sort based on EXIF capture time
capture_times, file_list = self.sort_file_list(file_list)
# read gps for ordered files
latlons = [self._read_lat_lon(filepath) for filepath in file_list]
if len(file_list) > 1:
# bearing between consecutive images
bearings = [lib.geo.compute_bearing(ll1[0], ll1[1], ll2[0], ll2[1])
for ll1, ll2 in zip(latlons, latlons[1:])]
bearings.append(bearings[-1])
bearings = {file_list[i]: lib.geo.offset_bearing(b, offset) for i, b in enumerate(bearings)}
elif num_file == 1:
#if there is only one file in the list, just write the direction 0 and offset
bearings = {file_list[0]: lib.geo.offset_bearing(0.0, offset)}
return bearings
def interpolate_timestamp(self):
'''
Interpolate time stamps in case of identical timestamps within a sequence
'''
timestamps = []
file_list = self.file_list
num_file = len(file_list)
time_dict = OrderedDict()
capture_times, file_list = self.sort_file_list(file_list)
if num_file < 2:
return capture_times, file_list
# trace identical timestamps (always assume capture_times is sorted)
time_dict = OrderedDict()
for i, t in enumerate(capture_times):
if t not in time_dict:
time_dict[t] = {
"count": 0,
"pointer": 0
}
interval = 0
if i != 0:
interval = (t - capture_times[i-1]).total_seconds()
time_dict[capture_times[i-1]]["interval"] = interval
time_dict[t]["count"] += 1
if len(time_dict) >= 2:
# set time interval as the last available time interval
time_dict[time_dict.keys()[-1]]["interval"] = time_dict[time_dict.keys()[-2]]["interval"]
else:
# set time interval assuming capture interval is 1 second
time_dict[time_dict.keys()[0]]["interval"] = time_dict[time_dict.keys()[0]]["count"] * 1.
# interpolate timestampes
for f, t in zip(file_list, capture_times):
d = time_dict[t]
s = datetime.timedelta(seconds=d["pointer"] * d["interval"] / float(d["count"]))
updated_time = t + s
time_dict[t]["pointer"] += 1
timestamps.append(updated_time)
return timestamps, file_list
def remove_duplicates(self, min_distance=1e-5, min_angle=5):
'''
Detect duplidate photos in a folder
@source: a less general version of @simonmikkelsen's duplicate remover
'''
file_list = self.file_list
# ordered list by time
capture_times, file_list = self.sort_file_list(file_list)
# read gps for ordered files
latlons = [self._read_lat_lon(filepath) for filepath in file_list]
# read bearing for ordered files
bearings = [self._read_direction(filepath) for filepath in file_list]
# interploated bearings
interpolated_bearings = [lib.geo.compute_bearing(ll1[0], ll1[1], ll2[0], ll2[1])
for ll1, ll2 in zip(latlons, latlons[1:])]
interpolated_bearings.append(bearings[-1])
# use interploated bearings if bearing not available in EXIF
for i, b in enumerate(bearings):
bearings[i] = b if b is not None else interpolated_bearings[i]
is_duplicate = False
prev_unique = file_list[0]
prev_latlon = latlons[0]
prev_bearing = bearings[0]
groups = []
group = []
for i, filename in enumerate(file_list[1:]):
k = i+1
distance = lib.geo.gps_distance(latlons[k], prev_latlon)
if bearings[k] is not None and prev_bearing is not None:
bearing_diff = lib.geo.diff_bearing(bearings[k], prev_bearing)
else:
# Not use bearing difference if no bearings are available
bearing_diff = 360
if distance < min_distance and bearing_diff < min_angle:
is_duplicate = True
else:
prev_latlon = latlons[k]
prev_bearing = bearings[k]
if is_duplicate:
group.append(filename)
else:
if group:
groups.append(group)
group = []
is_duplicate = False
groups.append(group)
# move to filepath/duplicates/group_id (TODO: uploader should skip the duplicate folder)
self.move_groups(groups, 'duplicates')
print("Done remove duplicate photos in {} into {} groups".format(self.filepath, len(groups)))
return groups
|
|
"""
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
from datetime import datetime
import unittest
import mock
from werkzeug.test import Client as HttpClient
from resource_api.errors import ValidationError, DoesNotExist, Forbidden
from resource_api.schema import DateTimeField, IntegerField
from resource_api_http.http import Application
from werkzeug.wrappers import Response
from resource_api_http_client.client import(Client, RootResourceCollection, ResourceInstance, ResourceCollection,
LinkHolder, LinkToOne, RootLinkCollection, LinkCollection, LinkInstance)
from resource_api_http_client.transport import JsonClient
from .base_test import BaseTest
from .simulators import TestService, TestResource, TestLink
class JsonTest(unittest.TestCase):
def _validate_exception(self, exception_class, status_code):
resp = mock.Mock()
resp.data = "666"
resp.status_code = status_code
client = mock.Mock()
client.open.return_value = resp
cli = JsonClient(client)
self.assertRaises(exception_class, cli.open, "some_url")
def test_item_does_not_exist(self):
self._validate_exception(DoesNotExist, 404)
def test_unknown_error(self):
self._validate_exception(Exception, 500)
self._validate_exception(Exception, 430)
def test_ok(self):
resp = mock.Mock()
resp.data = "666"
resp.status_code = 200
client = mock.Mock()
client.open.return_value = resp
cli = JsonClient(client)
self.assertEqual(cli.open("foo"), 666)
def test_validation_error(self):
self._validate_exception(ValidationError, 400)
def test_not_implemented_error(self):
self._validate_exception(NotImplementedError, 501)
def test_not_allowed(self):
self._validate_exception(Forbidden, 405)
class BaseClientTest(BaseTest):
def setUp(self):
super(BaseClientTest, self).setUp()
self.client = Client("/", JsonClient(HttpClient(Application(self.srv), Response)))
class ResourceTest(BaseClientTest):
def test_get_schema(self):
self.assertEqual(self.client.schema, {"foo.Target": mock.ANY, "foo.Source": mock.ANY})
def test_get_root_resource_collection(self):
collection = self.client.get_resource_by_name("foo.Source")
self.assertIsInstance(collection, RootResourceCollection)
def test_get_resource(self):
collection = self.client.get_resource_by_name("foo.Source")
item = collection.get(1)
self.assertIsInstance(item, ResourceInstance)
self.assertTrue(item.pk, 1)
self.assertEqual({"pk": 1, "more_data": "bla", "extra": "foo"}, item.data)
def test_get_count(self):
count = self.client.get_resource_by_name("foo.Source").count()
self.assertEqual(count, 2)
length = len(self.client.get_resource_by_name("foo.Source"))
self.assertEqual(length, 2)
def test_filter(self):
collection = self.client.get_resource_by_name("foo.Source").filter(params={"foo": "bar"})
self.assertNotIsInstance(collection, RootResourceCollection)
self.assertIsInstance(collection, ResourceCollection)
def test_iteration(self):
collection = self.client.get_resource_by_name("foo.Source")
items = list(collection)
self.assertIsInstance(items[0], ResourceInstance)
def test_access_by_index(self):
item = self.client.get_resource_by_name("foo.Source")[0]
self.assertIsInstance(item, ResourceInstance)
def test_create(self):
data = dict(pk=5, extra="Foo", more_data="Bar")
item = self.client.get_resource_by_name("foo.Source").create(data)
self.assertIsInstance(item, ResourceInstance)
self.assertEqual(item.pk, 5)
self.assertEqual(item.data, data)
def test_update(self):
item = self.client.get_resource_by_name("foo.Source")[0]
item.update(data={"extra": "Zool!!!!"})
self.assertEqual(item.data, {"extra": "Zool!!!!", "more_data": "bla", "pk": 1})
def test_delete(self):
collection = self.client.get_resource_by_name("foo.Source")
item = collection.get(1)
item.delete()
self.assertRaises(DoesNotExist, collection.get, 1)
class LinkToOneTest(BaseClientTest):
def test_get_link_holder(self):
links = self.client.get_resource_by_name("foo.Source")[0].links
self.assertIsInstance(links, LinkHolder)
def test_get_link_to_one(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.the_target
self.assertIsInstance(link, LinkToOne)
def test_get_link_to_one_target(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.the_target
self.assertEqual(link.item.target.pk, 2)
def test_get_link_to_one_data(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.the_target
self.assertEqual(link.item.data, {"extra": "foo", "more_data": "bla"})
def test_update(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.the_target
link.item.update({"extra": "Baga fel"})
self.assertEqual(link.item.data, {"extra": "Baga fel", "more_data": "bla"})
def test_set(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.the_target
link.set({"@target": 1, "extra": "Baga fel"})
self.assertEqual(link.item.target.pk, 1)
def test_delete(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.the_target
link.item.delete()
self.assertRaises(DoesNotExist, lambda: link.item.data)
class LinkToManytest(BaseClientTest):
def test_get_root_link_collection(self):
links = self.client.get_resource_by_name("foo.Source")[0].links.targets
self.assertIsInstance(links, RootLinkCollection)
def test_filter(self):
links = self.client.get_resource_by_name("foo.Source")[0].links.targets.filter()
self.assertNotIsInstance(links, RootLinkCollection)
self.assertIsInstance(links, LinkCollection)
def test_iteration(self):
links = list(self.client.get_resource_by_name("foo.Source")[0].links.targets)
link = links[0]
self.assertIsInstance(link, LinkInstance)
def test_access_by_index(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.targets[0]
self.assertIsInstance(link, LinkInstance)
def test_get_count(self):
links = self.client.get_resource_by_name("foo.Source")[0].links.targets
count = links.count()
self.assertEqual(count, 1)
length = len(links)
self.assertEqual(length, 1)
def test_update(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.targets[0]
link.update({"extra": "Baga fel"})
self.assertEqual(link.data, {"extra": "Baga fel", "more_data": "bla"})
def test_delete(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.targets[0]
link.delete()
self.assertRaises(DoesNotExist, lambda: link.data)
def test_create(self):
links = self.client.get_resource_by_name("foo.Source")[0].links.targets
link = links.create({"@target": 2})
self.assertIsInstance(link, LinkInstance)
self.assertEqual(links.count(), 2)
def test_get(self):
link = self.client.get_resource_by_name("foo.Source")[0].links.targets.get(1)
self.assertEqual(link.target.pk, 1)
class SerializationTest(unittest.TestCase):
def setUp(self):
class Source(TestResource):
class Schema:
pk = IntegerField(pk=True)
datetieme_field = DateTimeField(required=False)
class Links:
class targets(TestLink):
target = "Target"
one_way = True
class Schema:
datetieme_field = DateTimeField(required=False)
class Target(TestResource):
class Schema:
pk = IntegerField(pk=True)
self.srv = srv = TestService()
srv.register(Target, "foo.Target")
srv.register(Source, "foo.Source")
srv.setup()
def _c(model, pk):
srv.storage.set(model.get_name(), pk, {"pk": pk, "datetieme_field": datetime(1, 1, 1, 1, 1, 1)})
_c(Source, 1)
_c(Target, 1)
src = srv._resources_py[Source.get_name()]
srv.storage.set((1, src.links.targets.get_name()), 1, {"datetieme_field": datetime(1, 1, 1, 1, 1, 1)})
self.entry_point = ep = srv.get_entry_point({})
self.storage = srv.storage
self.src = ep.get_resource(Source)
self.target = ep.get_resource(Target)
self.client = Client("/", JsonClient(HttpClient(Application(srv), Response)))
def test_get_resource_datetime(self):
collection = self.client.get_resource_by_name("foo.Source")
item = collection.get(1)
self.assertEqual({"pk": 1, "datetieme_field": datetime(1, 1, 1, 1, 1, 1)}, item.data)
def test_get_link_datetime(self):
collection = self.client.get_resource_by_name("foo.Source")
item = collection.get(1)
link = item.links.targets.get(1)
self.assertEqual({"datetieme_field": datetime(1, 1, 1, 1, 1, 1)}, link.data)
|
|
#! /usr/bin/env python
"""
Module with detection algorithms.
"""
from __future__ import division
from __future__ import print_function
__author__ = 'C. Gomez @ ULg'
__all__ = ['detection',
'mask_source_centers',
'peak_coordinates']
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage.filters import correlate
from skimage import feature
from astropy.stats import sigma_clipped_stats
from astropy.stats import gaussian_fwhm_to_sigma, gaussian_sigma_to_fwhm
from astropy.table import Table
from astropy.modeling.models import Gaussian2D
from astropy.modeling.fitting import LevMarLSQFitter
from skimage.feature import peak_local_max
from ..var import (mask_circle, pp_subplots, get_square, frame_center,
frame_filter_gaussian2d, fit_2dgaussian)
from ..conf import sep
from .snr import snr_ss
from .frame_analysis import frame_quick_report
# TODO: Add the option of computing and thresholding an S/N map
def detection(array, psf, bkg_sigma=1, mode='lpeaks', matched_filter=False,
mask=True, snr_thresh=5, plot=True, debug=False,
full_output=False, verbose=True, save_plot=None,
object_name = None, frame_size=None, inner_rad=None,
pca_type = None, ncomp = None, NIRC2angscale = False):
""" Finds blobs in a 2d array. The algorithm is designed for automatically
finding planets in post-processed high contrast final frames. Blob can be
defined as a region of an image in which some properties are constant or
vary within a prescribed range of values. See <Notes> below to read about
the algorithm details.
Parameters
----------
array : array_like, 2d
Input frame.
psf : array_like
Input psf.
bkg_sigma : float, optional
The number standard deviations above the clipped median for setting the
background level.
mode : {'lpeaks','log','dog'}, optional
Sets with algorithm to use. Each algorithm yields different results.
matched_filter : {True, False}, bool optional
Whether to correlate with the psf of not.
mask : {True, False}, optional
Whether to mask the central region (circular aperture of 2*fwhm radius).
snr_thresh : float, optional
SNR threshold for deciding whether the blob is a detection or not.
plot {True, False}, bool optional
If True plots the frame showing the detected blobs on top.
debug : {False, True}, bool optional
Whether to print and plot additional/intermediate results.
full_output : {False, True}, bool optional
Whether to output just the coordinates of blobs that fulfill the SNR
constraint or a table with all the blobs and the peak pixels and SNR.
verbose : {True,False}, bool optional
Whether to print to stdout information about found blobs.
save_plot: string
If provided, the frames processed by blob detection are saved to that path.
object_name: string
Target name, used in the plot title
frame_size: int
Frame size of the pca, used in the plot title
inner_rad: int
Size of the mask in pca, as a unit of the FWHM, used in the plot title
pca_type: string
adi or rdi, used in the title
ncomp: int
Number of principal components used to compute the reduced frame, used in the title
NIRC2angscale: {False, True}
If True the plot axes are converted to angular scale (arcseconds,
assuming NIRC2's ~ 0.01 pixel scale)
Returns
-------
yy, xx : array_like
Two vectors with the y and x coordinates of the centers of the sources
(potential planets).
If full_output is True then a table with all the candidates that passed the
2d Gaussian fit constrains and their SNR is returned. Also the count of
companions with SNR>5 (those with highest probability of being true
detections).
Notes
-----
The FWHM of the PSF is measured directly on the provided array. If the
parameter matched_filter==True then the PSF is used to run a matched filter
(correlation) which is equivalent to a convolution filter. Filtering the
image will smooth the noise and maximize detectability of objects with a
shape similar to the kernel.
The background level or threshold is found with sigma clipped statistics
(5 sigma over the median) on the image/correlated image. Then 5 different
strategies can be used to detect the blobs (potential planets):
Local maxima + 2d Gaussian fit. The local peaks above the background on the
(correlated) frame are detected. A maximum filter is used for finding local
maxima. This operation dilates the original image and merges neighboring
local maxima closer than the size of the dilation. Locations where the
original image is equal to the dilated image are returned as local maxima.
The minimum separation between the peaks is 1*FWHM. A 2d Gaussian fit is
done on each of the maxima constraining the position on the subimage and the
sigma of the fit. Finally the blobs are filtered based on its SNR.
Laplacian of Gaussian + 2d Gaussian fit. It computes the Laplacian of
Gaussian images with successively increasing standard deviation and stacks
them up in a cube. Blobs are local maximas in this cube. LOG assumes that
the blobs are again assumed to be bright on dark. A 2d Gaussian fit is done
on each of the candidates constraining the position on the subimage and the
sigma of the fit. Finally the blobs are filtered based on its SNR.
Difference of Gaussians. This is a faster approximation of LoG approach. In
this case the image is blurred with increasing standard deviations and the
difference between two successively blurred images are stacked up in a cube.
DOG assumes that the blobs are again assumed to be bright on dark. A 2d
Gaussian fit is done on each of the candidates constraining the position on
the subimage and the sigma of the fit. Finally the blobs are filtered based
on its SNR.
"""
def check_blobs(array_padded, coords_temp, fwhm, debug):
y_temp = coords_temp[:,0]
x_temp = coords_temp[:,1]
coords = []
# Fitting a 2d gaussian to each local maxima position
for y,x in zip(y_temp,x_temp):
subim, suby, subx = get_square(array_padded,
2*int(np.ceil(fwhm)),
y+pad, x+pad, position=True)
cy, cx = frame_center(subim)
gauss = Gaussian2D(amplitude=subim.max(),
x_mean=cx, y_mean=cy,
x_stddev=fwhm*gaussian_fwhm_to_sigma,
y_stddev=fwhm*gaussian_fwhm_to_sigma, theta=0)
sy, sx = np.indices(subim.shape)
fitter = LevMarLSQFitter()
fit = fitter(gauss, sx, sy, subim)
# checking that the amplitude is positive > 0
# checking whether the x and y centroids of the 2d gaussian fit
# coincide with the center of the subimage (within 2px error)
# checking whether the mean of the fwhm in y and x of the fit
# are close to the FWHM_PSF with a margin of 3px
fwhm_y = fit.y_stddev.value*gaussian_sigma_to_fwhm
fwhm_x = fit.x_stddev.value*gaussian_sigma_to_fwhm
mean_fwhm_fit = np.mean([np.abs(fwhm_x), np.abs(fwhm_y)])
if fit.amplitude.value>0 \
and np.allclose(fit.y_mean.value, cy, atol=2) \
and np.allclose(fit.x_mean.value, cx, atol=2) \
and np.allclose(mean_fwhm_fit, fwhm, atol=3):
coords.append((suby+fit.y_mean.value,subx+fit.x_mean.value))
if debug:
print('Coordinates (Y,X): {:.3f},{:.3f}'.format(y, x))
print('fit peak = {:.3f}'.format(fit.amplitude.value))
#print fit
msg = 'fwhm_y in px = {:.3f}, fwhm_x in px = {:.3f}'
print(msg.format(fwhm_y, fwhm_x))
print('mean fit fwhm = {:.3f}'.format(mean_fwhm_fit))
pp_subplots(subim, colorb=True)
return coords
def print_coords(coords):
print('Blobs found:', len(coords))
print(' ycen xcen')
print('------ ------')
for i in range(len(coords[:,0])):
print('{:.3f} \t {:.3f}'.format(coords[i,0], coords[i,1]))
def print_abort():
if verbose:
print(sep)
print('No potential sources found')
print(sep)
# --------------------------------------------------------------------------
if not array.ndim == 2:
raise TypeError('Input array is not a frame or 2d array')
if not psf.ndim == 2 and psf.shape[0] < array.shape[0]:
raise TypeError('Input psf is not a 2d array or has wrong size')
# Getting the FWHM from the PSF array
outdf = fit_2dgaussian(psf, cent=(frame_center(psf)[1],frame_center(psf)[0]),debug=debug, full_output=True)
fwhm_x, fwhm_y = outdf.at[0,'fwhm_x'],outdf.at[0,'fwhm_y']
fwhm = np.mean([fwhm_x, fwhm_y])
if verbose:
print('FWHM =', fwhm)
print()
if debug:
print('FWHM_y', fwhm_y)
print('FWHM_x', fwhm_x)
# Masking the center, 2*lambda/D is the expected IWA
if mask: array = mask_circle(array, radius=fwhm)
# Matched filter
if matched_filter:
frame_det = correlate(array, psf)
else:
frame_det = array
# Estimation of background level
_, median, stddev = sigma_clipped_stats(frame_det, sigma=5, iters=None)
bkg_level = median + (stddev * bkg_sigma)
if debug:
print('Sigma clipped median = {:.3f}'.format(median))
print('Sigma clipped stddev = {:.3f}'.format(stddev))
print('Background threshold = {:.3f}'.format(bkg_level))
print()
if mode=='lpeaks' or mode=='log' or mode=='dog':
# Padding the image with zeros to avoid errors at the edges
pad = 10
array_padded = np.lib.pad(array, pad, 'constant', constant_values=0)
if debug and plot and matched_filter:
print('Input frame after matched filtering:')
pp_subplots(frame_det, rows=2, colorb=True)
if mode=='lpeaks':
# Finding local peaks (can be done in the correlated frame)
coords_temp = peak_local_max(frame_det, threshold_abs=bkg_level,
min_distance=int(np.ceil(fwhm)),
num_peaks=20)
coords = check_blobs(array_padded, coords_temp, fwhm, debug)
coords = np.array(coords)
if verbose and coords.shape[0]>0: print_coords(coords)
elif mode=='log':
sigma = fwhm*gaussian_fwhm_to_sigma
coords = feature.blob_log(frame_det.astype('float'),
threshold=bkg_level,
min_sigma=sigma-.5, max_sigma=sigma+.5)
if len(coords)==0:
print_abort()
return 0, 0
coords = coords[:,:2]
coords = check_blobs(array_padded, coords, fwhm, debug)
coords = np.array(coords)
if coords.shape[0]>0 and verbose: print_coords(coords)
elif mode=='dog':
sigma = fwhm*gaussian_fwhm_to_sigma
coords = feature.blob_dog(frame_det.astype('float'),
threshold=bkg_level,
min_sigma=sigma-.5, max_sigma=sigma+.5)
if len(coords)==0:
print_abort()
return 0, 0
coords = coords[:,:2]
coords = check_blobs(array_padded, coords, fwhm, debug)
coords = np.array(coords)
if coords.shape[0]>0 and verbose: print_coords(coords)
else:
msg = 'Wrong mode. Available modes: lpeaks, log, dog.'
raise TypeError(msg)
if coords.shape[0]==0:
print_abort()
return 0, 0
yy = coords[:,0]
xx = coords[:,1]
yy_final = []
xx_final = []
yy_out = []
xx_out = []
snr_list = []
xx -= pad
yy -= pad
# Checking SNR for potential sources
for i in range(yy.shape[0]):
y = yy[i]
x = xx[i]
if verbose:
print(sep)
print('X,Y = ({:.1f},{:.1f})'.format(x,y))
subim = get_square(array, size=15, y=y, x=x)
snr = snr_ss(array, (x,y), fwhm, False, verbose=False)
snr_list.append(snr)
if snr >= snr_thresh:
#if plot:
#pp_subplots(subim)
if verbose:
_ = frame_quick_report(array, fwhm, (x,y), verbose=verbose)
yy_final.append(y)
xx_final.append(x)
else:
yy_out.append(y)
xx_out.append(x)
if verbose: print('S/N constraint NOT fulfilled (S/N = {:.3f})'.format(snr))
if debug:
#if plot:
#pp_subplots(subim)
_ = frame_quick_report(array, fwhm, (x,y), verbose=verbose)
if debug or full_output:
table = Table([yy.tolist(), xx.tolist(), snr_list],
names=('y','x','px_snr'))
table.sort('px_snr')
yy_final = np.array(yy_final)
xx_final = np.array(xx_final)
yy_out = np.array(yy_out)
xx_out = np.array(xx_out)
if plot:
#print
#print sep
#print 'Input frame showing all the detected blobs / potential sources:'
#print 'In RED circles those that did not pass the SNR and 2dGaussian '
#print 'fit constraints while in CYAN circles those that passed them.'
fig, ax = plt.subplots(figsize=(6,6))
im = ax.imshow(array, origin='lower', interpolation='nearest',
cmap='gray', alpha=0.8)
# Option to plot axes in angular scale
if NIRC2angscale and frame_size is not None:
from scipy.ndimage import gaussian_filter
# Converting axes from pixels to arcseconds
# Find the middle value in the odd frame sizes
center_val = int((frame_size / 2.0) + 0.5)
# Place a tick every 0.5 arcseconds
half_num_ticks = center_val // 50
# Calculate the pixel locations at which to put ticks
ticks = []
for i in range(half_num_ticks, -half_num_ticks-1, -1):
# Avoid ticks not showing on the last pixel
if not center_val - (i) * 50 == frame_size:
ticks.append(center_val - (i) * 50)
else:
ticks.append((center_val - (i) * 50) - 1)
#print xticks
ax.set_xticks(ticks)
ax.set_yticks(ticks)
# Calculate the corresponding distance in arcseconds, measured from the center
labels = []
for i in range(half_num_ticks, -half_num_ticks-1, -1):
labels.append(0.0 - (i) * 0.5)
#print xlabels
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
ax.set_xlabel("arcseconds", fontsize=12)
ax.set_ylabel("arcseconds", fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=10)
# Set the title of the plot
if object_name is not None and inner_rad is not None:
ax.set_title(pca_type + ' ' + object_name+' '+ str(ncomp) + 'pc ' + str(frame_size)+'+'+str(inner_rad),
fontsize=14)
array_smoothed = gaussian_filter(array, sigma=(2.3, 2.3), order=0)
plt.imshow(array_smoothed, origin='lower')
else:
colorbar_ax = fig.add_axes([0.92, 0.12, 0.03, 0.78])
fig.colorbar(im, cax=colorbar_ax)
ax.grid('off')
for i in range(yy_out.shape[0]):
y = yy_out[i]
x = xx_out[i]
circ = plt.Circle((x, y), radius=fwhm, color='red', fill=False,
linewidth=1.5, alpha=0.6)
ax.text(x, y+1.5*fwhm, (int(x), int(y)), fontsize=10, color='red',
family='monospace', ha='center', va='top', weight='bold',
alpha=0.6)
ax.add_patch(circ)
for i in range(yy_final.shape[0]):
y = yy_final[i]
x = xx_final[i]
circ = plt.Circle((x, y), radius=fwhm, color='cyan', fill=False,
linewidth=2)
ax.text(x, y+1.5*fwhm, (int(x), int(y)), fontsize=10, color='cyan',
weight='heavy', family='monospace', ha='center', va='top')
ax.add_patch(circ)
# Save the plot if output path is provided
# Don't show the plot when running pipeline (i.e. when saving figures)
if save_plot is not None:
plt.savefig(save_plot, dpi= 100, bbox_inches='tight')
else:
plt.show()
if debug: print(table)
if full_output:
return table, yy_final.shape[0]
else:
return yy_final, xx_final
def peak_coordinates(obj_tmp, fwhm, approx_peak=None, search_box=None,
channels_peak=False):
"""Find the pixel coordinates of maximum in either a frame or a cube,
after convolution with gaussian. It first applies a gaussian filter, to
lower the probability of returning a hot pixel (although it may still
happen with clumps of hot pixels, hence the need for function
"approx_stellar_position").
Parameters
----------
obj_tmp : cube_like or frame_like
Input 3d cube or image.
fwhm : float_like
Input full width half maximum value of the PSF in pixels. This will be
used as the standard deviation for Gaussian kernel of the Gaussian
filtering.
approx_peak: 2 components list or array, opt
Gives the approximate coordinates of the peak.
search_box: float or 2 components list or array, opt
Gives the half-size in pixels of a box in which the peak is searched,
around approx_peak. If float, it is assumed the same box size is wanted
in both y and x. Note that this parameter should be provided if
approx_peak is provided.
channels_peak: bool, {False, True}, opt
Whether returns the indices of the peak in each channel in addition to
the global indices of the peak in the cube. If True, it would hence also
return two 1d-arrays. (note: only available if the input is a 3d cube)
Returns
-------
zz_max, yy_max, xx_max : integers
Indices of highest throughput channel
"""
ndims = len(obj_tmp.shape)
assert ndims == 2 or ndims == 3, "Array is not two or three dimensional"
if approx_peak is not None:
assert len(approx_peak) == 2, "Approx peak is not two dimensional"
if isinstance(search_box,float) or isinstance(search_box,int):
sbox_y = search_box
sbox_x = search_box
elif len(search_box) == 2:
sbox_y = search_box[0]
sbox_x = search_box[1]
else:
msg = "The search box does not have the right number of elements"
raise ValueError(msg)
if ndims == 3:
n_z = obj_tmp.shape[0]
sbox = np.zeros([n_z,2*sbox_y+1,2*sbox_x+1])
if ndims == 2:
gauss_filt_tmp = frame_filter_gaussian2d(obj_tmp,
fwhm/gaussian_sigma_to_fwhm)
if approx_peak is None:
ind_max = np.unravel_index(gauss_filt_tmp.argmax(),
gauss_filt_tmp.shape)
else:
sbox = gauss_filt_tmp[approx_peak[0]-sbox_y:approx_peak[0]+sbox_y+1,
approx_peak[1]-sbox_x:approx_peak[1]+sbox_x+1]
ind_max_sbox = np.unravel_index(sbox.argmax(), sbox.shape)
ind_max = (approx_peak[0]-sbox_y+ind_max_sbox[0],
approx_peak[1]-sbox_x+ind_max_sbox[1])
return ind_max
if ndims == 3:
n_z = obj_tmp.shape[0]
gauss_filt_tmp = np.zeros_like(obj_tmp)
ind_ch_max = np.zeros([n_z,2])
for zz in range(n_z):
gauss_filt_tmp[zz] = frame_filter_gaussian2d(obj_tmp[zz],
fwhm[zz]/gaussian_sigma_to_fwhm)
if approx_peak is None:
ind_ch_max[zz] = np.unravel_index(gauss_filt_tmp[zz].argmax(),
gauss_filt_tmp[zz].shape)
else:
sbox[zz] = gauss_filt_tmp[zz, approx_peak[0]-sbox_y:\
approx_peak[0]+sbox_y+1,
approx_peak[1]-sbox_x:\
approx_peak[1]+sbox_x+1]
ind_max_sbox = np.unravel_index(sbox[zz].argmax(),
sbox[zz].shape)
ind_ch_max[zz] = (approx_peak[0]-sbox_y+ind_max_sbox[0],
approx_peak[1]-sbox_x+ind_max_sbox[1])
if approx_peak is None:
ind_max = np.unravel_index(gauss_filt_tmp.argmax(),
gauss_filt_tmp.shape)
else:
ind_max_tmp = np.unravel_index(sbox.argmax(),
sbox.shape)
ind_max = (ind_max_tmp[0]+approx_peak[0]-sbox_y,
ind_max_tmp[1]+approx_peak[1]-sbox_x)
if channels_peak:
return ind_max, ind_ch_max
else:
return ind_max
def mask_source_centers(array, fwhm, y, x):
""" Creates a mask of ones with the size of the input frame and zeros at
the center of the sources (planets) with coordinates x, y.
Parameters
----------
array : array_like
Input frame.
fwhm : float
Size in pixels of the FWHM.
y, x : tuples of int
Coordinates of the center of the sources.
Returns
-------
mask : array_like
Mask frame.
"""
if not array.ndim==2:
raise TypeError('Wrong input array shape.')
frame = array.copy()
if not y and x:
frame = mask_circle(frame, radius=2*fwhm)
yy, xx = detection(frame, fwhm, plot=False, mode='log')
else:
yy = np.array(y); xx = np.array(x)
mask = np.ones_like(array)
# center sources become zeros
mask[yy.astype('int'), xx.astype('int')] = 0
return mask
|
|
# -*- coding: utf-8 -*-
"""
Convert 3D DC/IP Data to 2D Lines
=================================
3D DC/IP surveys are frequently comprised of a set of 2D survey lines.
In this case, the 3D survey can be parsed into a list of
2D surveys; which can be imaged or inverted independently.
In this tutorial, we focus on the following:
- Loading and plotting the distribution of 3D DC/IP data using a 3D pseudo-section
- Parsing the 3D survey geometry and associated data to a set a 2D surveys
- Plotting data for each 2D survey on a 2D pseudo-section
- Including survey topography when plotting pseudo-sections
In this case, the survey consists of dipole-dipole data for three East-West lines
and two North-South lines.
"""
#########################################################################
# Import modules
# --------------
#
from discretize.utils import mkvc
from SimPEG import utils
from SimPEG.utils.io_utils.io_utils_electromagnetics import read_dcip_xyz
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static.utils.static_utils import (
apparent_resistivity_from_voltage,
convert_survey_3d_to_2d_lines,
plot_pseudosection,
)
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import tarfile
mpl.rcParams.update({"font.size": 16})
try:
import plotly
from SimPEG.electromagnetics.static.utils.static_utils import plot_3d_pseudosection
has_plotly = True
except:
has_plotly = False
pass
# sphinx_gallery_thumbnail_number = 3
##########################################################
# Download Assets
# ---------------
#
# Here we provide the file paths to assets we need to run the inversion. The
# path to the true model conductivity and chargeability models are also
# provided for comparison with the inversion results. These files are stored as a
# tar-file on our google cloud bucket:
# "https://storage.googleapis.com/simpeg/doc-assets/dcr3d.tar.gz"
#
#
# storage bucket where we have the data
data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr3d.tar.gz"
# download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# path to the directory containing our data
dir_path = downloaded_data.split(".")[0] + os.path.sep
# files to work with
topo_filename = dir_path + "topo_xyz.txt"
data_filename = dir_path + "dc_data.xyz"
#############################################
# Load the Data
# -------------
#
# Here we load the file needed to run the tutorial.
# In this case, we load the surface topography and an XYZ formatted data file
# containing 3D DC resistivity data.
#
# Load 3D topography
topo_xyz = np.loadtxt(str(topo_filename))
# Load 3D data. Here, the data are loaded from an XYZ formatted data file.
# The user must supply the proper headers for the function to identify the
# correct column. Using the 'dict_headers' keyword argument, we can load and
# organize additional columns in the data file as a dictionary.
data_3d, out_dict = read_dcip_xyz(
data_filename,
"volt",
a_headers=["XA", "YA", "ZA"],
b_headers=["XB", "YB", "ZB"],
m_headers=["XM", "YM", "ZM"],
n_headers=["XN", "YN", "ZN"],
data_header="V/A",
uncertainties_header="UNCERT",
dict_headers=["LINEID"],
)
#######################################################################
# Plot 3D Pseudosection
# ---------------------
#
# Here we demonstrate how 3D DC resistivity data can be represented on a 3D
# pseudosection plot. To use this utility, you must have Python's *plotly*
# package. Here, we represent the data as apparent conductivities.
#
# Extract 3D survey and observed data
survey_3d = data_3d.survey
dobs_3d = data_3d.dobs
# Convert predicted data to apparent conductivities
apparent_conductivity_3d = 1 / apparent_resistivity_from_voltage(
survey_3d, dobs_3d, space_type="half space"
)
if has_plotly:
fig = plot_3d_pseudosection(
survey_3d, apparent_conductivity_3d, scale="log", units="S/m",
)
fig.update_layout(
title_text="Apparent Conductivity",
title_x=0.5,
title_font_size=24,
width=650,
height=500,
scene_camera=dict(
center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.6, y=-1.6, z=1.8)
),
)
plotly.io.show(fig)
else:
print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS")
######################################################################
# Convert From 3D to 2D
# ---------------------
#
# Here, we convert the 3D survey into a list of 2D surveys. A vector containing
# a line ID for each datum is required. By setting 'output_indexing' to True,
# we output a list containing the indices to extract the data for each 2D survey
# from vectors associated with the 3D survey.
#
# Extract line ID from dictionary
lineID = out_dict["LINEID"]
# Convert 3D survey to a list of 3D surveys
survey_2d_list, index_list = convert_survey_3d_to_2d_lines(
survey_3d, lineID, data_type="volt", output_indexing=True
)
# Create list of 2D apparent conductivities. Note that if you converted observed
# data then computed apparent conductivities, you would be doing so assuming 2D
# survey geometry and the values would not match those on the 3D pseudosection plot.
dobs_2d_list = []
apparent_conductivities_2d = []
for ind in index_list:
dobs_2d_list.append(dobs_3d[ind])
apparent_conductivities_2d.append(apparent_conductivity_3d[ind])
#######################################################################
# Plot 2D Pseudosections
# ----------------------
#
title_str = [
"East-West Line at Northing = 0 m",
"North-South Line at Easting = -350 m",
"North-South Line at Easting = -350 m",
]
# Plot apparent conductivity pseudo-section
for ii in range(len(survey_2d_list)):
vlim = [apparent_conductivity_3d.min(), apparent_conductivity_3d.max()]
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey_2d_list[ii],
dobs=apparent_conductivities_2d[ii],
plot_type="contourf",
ax=ax1,
vlim=vlim,
scale="log",
cbar_label="Apparent Conducitivty [S/m]",
mask_topography=True,
contourf_opts={"levels": 30, "cmap": mpl.cm.viridis},
)
ax1.set_title(title_str[ii])
plt.show()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport):
"""gRPC backend transport for IndexEndpointService.
A service for managing Vertex AI's IndexEndpoints.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.CreateIndexEndpointRequest], operations_pb2.Operation
]:
r"""Return a callable for the create index endpoint method over gRPC.
Creates an IndexEndpoint.
Returns:
Callable[[~.CreateIndexEndpointRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index_endpoint" not in self._stubs:
self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint",
request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index_endpoint"]
@property
def get_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.GetIndexEndpointRequest], index_endpoint.IndexEndpoint
]:
r"""Return a callable for the get index endpoint method over gRPC.
Gets an IndexEndpoint.
Returns:
Callable[[~.GetIndexEndpointRequest],
~.IndexEndpoint]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index_endpoint" not in self._stubs:
self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint",
request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize,
response_deserializer=index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["get_index_endpoint"]
@property
def list_index_endpoints(
self,
) -> Callable[
[index_endpoint_service.ListIndexEndpointsRequest],
index_endpoint_service.ListIndexEndpointsResponse,
]:
r"""Return a callable for the list index endpoints method over gRPC.
Lists IndexEndpoints in a Location.
Returns:
Callable[[~.ListIndexEndpointsRequest],
~.ListIndexEndpointsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_index_endpoints" not in self._stubs:
self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints",
request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize,
response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize,
)
return self._stubs["list_index_endpoints"]
@property
def update_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.UpdateIndexEndpointRequest],
gca_index_endpoint.IndexEndpoint,
]:
r"""Return a callable for the update index endpoint method over gRPC.
Updates an IndexEndpoint.
Returns:
Callable[[~.UpdateIndexEndpointRequest],
~.IndexEndpoint]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index_endpoint" not in self._stubs:
self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint",
request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize,
response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["update_index_endpoint"]
@property
def delete_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.DeleteIndexEndpointRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete index endpoint method over gRPC.
Deletes an IndexEndpoint.
Returns:
Callable[[~.DeleteIndexEndpointRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index_endpoint" not in self._stubs:
self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint",
request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index_endpoint"]
@property
def deploy_index(
self,
) -> Callable[
[index_endpoint_service.DeployIndexRequest], operations_pb2.Operation
]:
r"""Return a callable for the deploy index method over gRPC.
Deploys an Index into this IndexEndpoint, creating a
DeployedIndex within it.
Only non-empty Indexes can be deployed.
Returns:
Callable[[~.DeployIndexRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deploy_index" not in self._stubs:
self._stubs["deploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex",
request_serializer=index_endpoint_service.DeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_index"]
@property
def undeploy_index(
self,
) -> Callable[
[index_endpoint_service.UndeployIndexRequest], operations_pb2.Operation
]:
r"""Return a callable for the undeploy index method over gRPC.
Undeploys an Index from an IndexEndpoint, removing a
DeployedIndex from it, and freeing all resources it's
using.
Returns:
Callable[[~.UndeployIndexRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undeploy_index" not in self._stubs:
self._stubs["undeploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex",
request_serializer=index_endpoint_service.UndeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_index"]
@property
def mutate_deployed_index(
self,
) -> Callable[
[index_endpoint_service.MutateDeployedIndexRequest], operations_pb2.Operation
]:
r"""Return a callable for the mutate deployed index method over gRPC.
Update an existing DeployedIndex under an
IndexEndpoint.
Returns:
Callable[[~.MutateDeployedIndexRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_deployed_index" not in self._stubs:
self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex",
request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["mutate_deployed_index"]
def close(self):
self.grpc_channel.close()
__all__ = ("IndexEndpointServiceGrpcTransport",)
|
|
# -*- coding: utf-8 -*-
"""
sphinx.util.docfields
~~~~~~~~~~~~~~~~~~~~~
"Doc fields" are reST field lists in object descriptions that will
be domain-specifically transformed to a more appealing presentation.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from sphinx import addnodes
def _is_single_paragraph(node):
"""True if the node only contains one paragraph (and system messages)."""
if len(node) == 0:
return False
elif len(node) > 1:
for subnode in node[1:]:
if not isinstance(subnode, nodes.system_message):
return False
if isinstance(node[0], nodes.paragraph):
return True
return False
class Field(object):
"""
A doc field that is never grouped. It can have an argument or not, the
argument can be linked using a specified *rolename*. Field should be used
for doc fields that usually don't occur more than once.
Example::
:returns: description of the return value
:rtype: description of the return type
"""
is_grouped = False
is_typed = False
def __init__(self, name, names=(), label=None, has_arg=True, rolename=None):
self.name = name
self.names = names
self.label = label
self.has_arg = has_arg
self.rolename = rolename
def make_xref(self, rolename, domain, target, innernode=nodes.emphasis):
if not rolename:
return innernode(target, target)
refnode = addnodes.pending_xref('', refdomain=domain, refexplicit=False,
reftype=rolename, reftarget=target)
refnode += innernode(target, target)
return refnode
def make_entry(self, fieldarg, content):
return (fieldarg, content)
def make_field(self, types, domain, item):
fieldarg, content = item
fieldname = nodes.field_name('', self.label)
if fieldarg:
fieldname += nodes.Text(' ')
fieldname += self.make_xref(self.rolename, domain,
fieldarg, nodes.Text)
fieldbody = nodes.field_body('', nodes.paragraph('', '', *content))
return nodes.field('', fieldname, fieldbody)
class GroupedField(Field):
"""
A doc field that is grouped; i.e., all fields of that type will be
transformed into one field with its body being a bulleted list. It always
has an argument. The argument can be linked using the given *rolename*.
GroupedField should be used for doc fields that can occur more than once.
If *can_collapse* is true, this field will revert to a Field if only used
once.
Example::
:raises ErrorClass: description when it is raised
"""
is_grouped = True
list_type = nodes.bullet_list
def __init__(self, name, names=(), label=None, rolename=None,
can_collapse=False):
Field.__init__(self, name, names, label, True, rolename)
self.can_collapse = can_collapse
def make_field(self, types, domain, items):
fieldname = nodes.field_name('', self.label)
listnode = self.list_type()
if len(items) == 1 and self.can_collapse:
return Field.make_field(self, types, domain, items[0])
for fieldarg, content in items:
par = nodes.paragraph()
par += self.make_xref(self.rolename, domain, fieldarg, nodes.strong)
par += nodes.Text(' -- ')
par += content
listnode += nodes.list_item('', par)
fieldbody = nodes.field_body('', listnode)
return nodes.field('', fieldname, fieldbody)
class TypedField(GroupedField):
"""
A doc field that is grouped and has type information for the arguments. It
always has an argument. The argument can be linked using the given
*rolename*, the type using the given *typerolename*.
Two uses are possible: either parameter and type description are given
separately, using a field from *names* and one from *typenames*,
respectively, or both are given using a field from *names*, see the example.
Example::
:param foo: description of parameter foo
:type foo: SomeClass
-- or --
:param SomeClass foo: description of parameter foo
"""
is_typed = True
def __init__(self, name, names=(), typenames=(), label=None,
rolename=None, typerolename=None, can_collapse=False):
GroupedField.__init__(self, name, names, label, rolename, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
def make_field(self, types, domain, items):
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += self.make_xref(self.rolename, domain, fieldarg, nodes.strong)
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
par += self.make_xref(self.typerolename, domain, typename)
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
class DocFieldTransformer(object):
"""
Transforms field lists in "doc field" syntax into better-looking
equivalents, using the field type definitions given on a domain.
"""
def __init__(self, directive):
self.domain = directive.domain
if '_doc_field_type_map' not in directive.__class__.__dict__:
directive.__class__._doc_field_type_map = \
self.preprocess_fieldtypes(directive.__class__.doc_field_types)
self.typemap = directive._doc_field_type_map
def preprocess_fieldtypes(self, types):
typemap = {}
for fieldtype in types:
for name in fieldtype.names:
typemap[name] = fieldtype, False
if fieldtype.is_typed:
for name in fieldtype.typenames:
typemap[name] = fieldtype, True
return typemap
def transform_all(self, node):
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
for child in node:
if isinstance(child, nodes.field_list):
self.transform(child)
def transform(self, node):
"""Transform a single field list *node*."""
typemap = self.typemap
entries = []
groupindices = {}
types = {}
# step 1: traverse all fields and collect field types and content
for field in node:
fieldname, fieldbody = field
try:
# split into field type and argument
fieldtype, fieldarg = fieldname.astext().split(None, 1)
except ValueError:
# maybe an argument-less field type?
fieldtype, fieldarg = fieldname.astext(), ''
typedesc, is_typefield = typemap.get(fieldtype, (None, None))
# sort out unknown fields
if typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
fieldname[0] = nodes.Text(new_fieldname)
entries.append(field)
continue
typename = typedesc.name
# collect the content, trying not to keep unnecessary paragraphs
if _is_single_paragraph(fieldbody):
content = fieldbody.children[0].children
else:
content = fieldbody.children
# if the field specifies a type, put it in the types collection
if is_typefield:
# filter out only inline nodes; others will result in invalid
# markup being written out
content = filter(
lambda n: isinstance(n, nodes.Inline) or
isinstance(n, nodes.Text),
content)
if content:
types.setdefault(typename, {})[fieldarg] = content
continue
# also support syntax like ``:param type name:``
if typedesc.is_typed:
try:
argtype, argname = fieldarg.split(None, 1)
except ValueError:
pass
else:
types.setdefault(typename, {})[argname] = \
[nodes.Text(argtype)]
fieldarg = argname
translatable_content = nodes.inline(fieldbody.rawsource,
translatable=True)
translatable_content.source = fieldbody.parent.source
translatable_content.line = fieldbody.parent.line
translatable_content += content
# grouped entries need to be collected in one entry, while others
# get one entry per field
if typedesc.is_grouped:
if typename in groupindices:
group = entries[groupindices[typename]]
else:
groupindices[typename] = len(entries)
group = [typedesc, []]
entries.append(group)
entry = typedesc.make_entry(fieldarg, [translatable_content])
group[1].append(entry)
else:
entry = typedesc.make_entry(fieldarg, [translatable_content])
entries.append([typedesc, entry])
# step 2: all entries are collected, construct the new field list
new_list = nodes.field_list()
for entry in entries:
if isinstance(entry, nodes.field):
# pass-through old field
new_list += entry
else:
fieldtype, content = entry
fieldtypes = types.get(fieldtype.name, {})
new_list += fieldtype.make_field(fieldtypes, self.domain,
content)
node.replace_self(new_list)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
pyformat --in_place third_party/tensorflow/tools/compatibility/renames_v2.py
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.compat.v1.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.debugging.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.debugging.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.control_flow_v2_enabled':
'tf.compat.v1.control_flow_v2_enabled',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_control_flow_v2':
'tf.compat.v1.disable_control_flow_v2',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_tensor_equality':
'tf.compat.v1.disable_tensor_equality',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.experimental.ParameterServerStrategy':
'tf.compat.v1.distribute.experimental.ParameterServerStrategy',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_control_flow_v2':
'tf.compat.v1.enable_control_flow_v2',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_tensor_equality':
'tf.compat.v1.enable_tensor_equality',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.estimator.tpu.InputPipelineConfig':
'tf.compat.v1.estimator.tpu.InputPipelineConfig',
'tf.estimator.tpu.RunConfig':
'tf.compat.v1.estimator.tpu.RunConfig',
'tf.estimator.tpu.TPUConfig':
'tf.compat.v1.estimator.tpu.TPUConfig',
'tf.estimator.tpu.TPUEstimator':
'tf.compat.v1.estimator.tpu.TPUEstimator',
'tf.estimator.tpu.TPUEstimatorSpec':
'tf.compat.v1.estimator.tpu.TPUEstimatorSpec',
'tf.estimator.tpu.experimental.EmbeddingConfigSpec':
'tf.compat.v1.estimator.tpu.experimental.EmbeddingConfigSpec',
'tf.executing_eagerly_outside_functions':
'tf.compat.v1.executing_eagerly_outside_functions',
'tf.experimental.output_all_intermediates':
'tf.compat.v1.experimental.output_all_intermediates',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floordiv':
'tf.math.floordiv',
'tf.floormod':
'tf.math.floormod',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_crop_or_pad':
'tf.image.resize_with_crop_or_pad',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.image.transpose',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.backend.set_session':
'tf.compat.v1.keras.backend.set_session',
'tf.keras.experimental.export_saved_model':
'tf.compat.v1.keras.experimental.export_saved_model',
'tf.keras.experimental.load_from_saved_model':
'tf.compat.v1.keras.experimental.load_from_saved_model',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.layers.disable_v2_dtype_behavior':
'tf.compat.v1.keras.layers.disable_v2_dtype_behavior',
'tf.keras.layers.enable_v2_dtype_behavior':
'tf.compat.v1.keras.layers.enable_v2_dtype_behavior',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.FLOAT16':
'tf.compat.v1.lite.constants.FLOAT16',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.experimental.get_potentially_supported_ops':
'tf.compat.v1.lite.experimental.get_potentially_supported_ops',
'tf.lite.experimental.nn.TFLiteLSTMCell':
'tf.compat.v1.lite.experimental.nn.TFLiteLSTMCell',
'tf.lite.experimental.nn.TfLiteRNNCell':
'tf.compat.v1.lite.experimental.nn.TfLiteRNNCell',
'tf.lite.experimental.nn.dynamic_rnn':
'tf.compat.v1.lite.experimental.nn.dynamic_rnn',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.mixed_precision.DynamicLossScale':
'tf.compat.v1.mixed_precision.DynamicLossScale',
'tf.mixed_precision.FixedLossScale':
'tf.compat.v1.mixed_precision.FixedLossScale',
'tf.mixed_precision.LossScale':
'tf.compat.v1.mixed_precision.LossScale',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.mixed_precision.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.mixed_precision.MixedPrecisionLossScaleOptimizer',
'tf.mixed_precision.disable_mixed_precision_graph_rewrite':
'tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite',
'tf.mixed_precision.enable_mixed_precision_graph_rewrite':
'tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite',
'tf.mod':
'tf.math.floormod',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.compat.v1.nn.ctc_loss_v2',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.no_regularizer':
'tf.compat.v1.no_regularizer',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.ragged.placeholder':
'tf.compat.v1.ragged.placeholder',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.DEBUG_DIRECTORY':
'tf.saved_model.DEBUG_DIRECTORY',
'tf.saved_model.constants.DEBUG_INFO_FILENAME_PB':
'tf.saved_model.DEBUG_INFO_FILENAME_PB',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.MethodNameUpdater':
'tf.compat.v1.saved_model.signature_def_utils.MethodNameUpdater',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_max':
'tf.compat.v1.scatter_nd_max',
'tf.scatter_nd_min':
'tf.compat.v1.scatter_nd_min',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.SparseConditionalAccumulator':
'tf.compat.v1.sparse.SparseConditionalAccumulator',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.all_v2_summary_ops':
'tf.compat.v1.summary.all_v2_summary_ops',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.tpu.CrossShardOptimizer':
'tf.compat.v1.tpu.CrossShardOptimizer',
'tf.tpu.PaddingSpec':
'tf.compat.v1.tpu.PaddingSpec',
'tf.tpu.batch_parallel':
'tf.compat.v1.tpu.batch_parallel',
'tf.tpu.bfloat16_scope':
'tf.compat.v1.tpu.bfloat16_scope',
'tf.tpu.core':
'tf.compat.v1.tpu.core',
'tf.tpu.cross_replica_sum':
'tf.compat.v1.tpu.cross_replica_sum',
'tf.tpu.experimental.AdagradParameters':
'tf.compat.v1.tpu.experimental.AdagradParameters',
'tf.tpu.experimental.AdamParameters':
'tf.compat.v1.tpu.experimental.AdamParameters',
'tf.tpu.experimental.FtrlParameters':
'tf.compat.v1.tpu.experimental.FtrlParameters',
'tf.tpu.experimental.StochasticGradientDescentParameters':
'tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters',
'tf.tpu.experimental.embedding_column':
'tf.compat.v1.tpu.experimental.embedding_column',
'tf.tpu.experimental.shared_embedding_columns':
'tf.compat.v1.tpu.experimental.shared_embedding_columns',
'tf.tpu.initialize_system':
'tf.compat.v1.tpu.initialize_system',
'tf.tpu.outside_compilation':
'tf.compat.v1.tpu.outside_compilation',
'tf.tpu.replicate':
'tf.compat.v1.tpu.replicate',
'tf.tpu.rewrite':
'tf.compat.v1.tpu.rewrite',
'tf.tpu.shard':
'tf.compat.v1.tpu.shard',
'tf.tpu.shutdown_system':
'tf.compat.v1.tpu.shutdown_system',
'tf.tpu.XLAOptions':
'tf.compat.v1.tpu.XLAOptions',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_query_by_factory_request(
subscription_id: str,
resource_group_name: str,
factory_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/queryPipelineRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"factoryName": _SERIALIZER.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
factory_name: str,
run_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelineruns/{runId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"factoryName": _SERIALIZER.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_cancel_request(
subscription_id: str,
resource_group_name: str,
factory_name: str,
run_id: str,
*,
is_recursive: Optional[bool] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelineruns/{runId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"factoryName": _SERIALIZER.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
"runId": _SERIALIZER.url("run_id", run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if is_recursive is not None:
query_parameters['isRecursive'] = _SERIALIZER.query("is_recursive", is_recursive, 'bool')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PipelineRunsOperations(object):
"""PipelineRunsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datafactory.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def query_by_factory(
self,
resource_group_name: str,
factory_name: str,
filter_parameters: "_models.RunFilterParameters",
**kwargs: Any
) -> "_models.PipelineRunsQueryResponse":
"""Query pipeline runs in the factory based on input filter conditions.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param filter_parameters: Parameters to filter the pipeline run.
:type filter_parameters: ~azure.mgmt.datafactory.models.RunFilterParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRunsQueryResponse, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.PipelineRunsQueryResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRunsQueryResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(filter_parameters, 'RunFilterParameters')
request = build_query_by_factory_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
content_type=content_type,
json=_json,
template_url=self.query_by_factory.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRunsQueryResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
query_by_factory.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/queryPipelineRuns'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
factory_name: str,
run_id: str,
**kwargs: Any
) -> "_models.PipelineRun":
"""Get a pipeline run by its run ID.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param run_id: The pipeline run identifier.
:type run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRun, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.PipelineRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
run_id=run_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelineruns/{runId}'} # type: ignore
@distributed_trace
def cancel(
self,
resource_group_name: str,
factory_name: str,
run_id: str,
is_recursive: Optional[bool] = None,
**kwargs: Any
) -> None:
"""Cancel a pipeline run by its run ID.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param run_id: The pipeline run identifier.
:type run_id: str
:param is_recursive: If true, cancel all the Child pipelines that are triggered by the current
pipeline.
:type is_recursive: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
run_id=run_id,
is_recursive=is_recursive,
template_url=self.cancel.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/pipelineruns/{runId}/cancel'} # type: ignore
|
|
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.i18n import _
from sahara.plugins.cdh.v5_5_0 import plugin_utils as pu
from sahara.plugins.cdh import validation
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as u
class ValidatorV550(validation.Validator):
PU = pu.PluginUtilsV550()
@classmethod
def validate_cluster_creating(cls, cluster):
super(ValidatorV550, cls).validate_cluster_creating(cluster)
cls._hdfs_ha_validation(cluster)
cls._yarn_ha_validation(cluster)
cls._flume_validation(cluster)
cls._sentry_validation(cluster)
cls._solr_validation(cluster)
cls._sqoop_validation(cluster)
cls._hbase_indexer_validation(cluster)
cls._impala_validation(cluster)
cls._kms_validation(cluster)
@classmethod
def _hdfs_ha_validation(cls, cluster):
jn_count = cls._get_inst_count(cluster, 'HDFS_JOURNALNODE')
zk_count = cls._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
cluster)
if jn_count > 0:
if jn_count < 3:
raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
_('not less than 3'),
jn_count)
if not jn_count % 2:
raise ex.InvalidComponentCountException('HDFS_JOURNALNODE',
_('be odd'), jn_count)
if zk_count < 1:
raise ex.RequiredServiceMissingException('ZOOKEEPER',
required_by='HDFS HA')
if require_anti_affinity:
if 'HDFS_SECONDARYNAMENODE' not in\
cls._get_anti_affinity(cluster):
raise ex.NameNodeHAConfigurationError(
_('HDFS_SECONDARYNAMENODE should be enabled '
'in anti_affinity.'))
if 'HDFS_NAMENODE' not in cls._get_anti_affinity(cluster):
raise ex.NameNodeHAConfigurationError(
_('HDFS_NAMENODE should be enabled in anti_affinity.'))
@classmethod
def _yarn_ha_validation(cls, cluster):
rm_count = cls._get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
zk_count = cls._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
stdb_rm_count = cls._get_inst_count(cluster, 'YARN_STANDBYRM')
require_anti_affinity = cls.PU.c_helper.get_required_anti_affinity(
cluster)
if stdb_rm_count > 1:
raise ex.InvalidComponentCountException(
'YARN_STANDBYRM', _('0 or 1'), stdb_rm_count)
if stdb_rm_count > 0:
if rm_count < 1:
raise ex.RequiredServiceMissingException(
'YARN_RESOURCEMANAGER', required_by='RM HA')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='RM HA')
if require_anti_affinity:
if 'YARN_RESOURCEMANAGER' not in\
cls._get_anti_affinity(cluster):
raise ex.ResourceManagerHAConfigurationError(
_('YARN_RESOURCEMANAGER should be enabled in '
'anti_affinity.'))
if 'YARN_STANDBYRM' not in cls._get_anti_affinity(cluster):
raise ex.ResourceManagerHAConfigurationError(
_('YARN_STANDBYRM should be'
' enabled in anti_affinity.'))
@classmethod
def _flume_validation(cls, cluster):
a_count = cls._get_inst_count(cluster, 'FLUME_AGENT')
dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
if a_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='FLUME_AGENT')
@classmethod
def _sentry_validation(cls, cluster):
snt_count = cls._get_inst_count(cluster, 'SENTRY_SERVER')
dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
zk_count = cls._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
if snt_count > 1:
raise ex.InvalidComponentCountException(
'SENTRY_SERVER', _('0 or 1'), snt_count)
if snt_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='SENTRY_SERVER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='SENTRY_SERVER')
@classmethod
def _solr_validation(cls, cluster):
slr_count = cls._get_inst_count(cluster, 'SOLR_SERVER')
zk_count = cls._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
if slr_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='SOLR_SERVER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='SOLR_SERVER')
@classmethod
def _sqoop_validation(cls, cluster):
s2s_count = cls._get_inst_count(cluster, 'SQOOP_SERVER')
dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
hs_count = cls._get_inst_count(cluster, 'YARN_JOBHISTORY')
nm_count = cls._get_inst_count(cluster, 'YARN_NODEMANAGER')
if s2s_count > 1:
raise ex.InvalidComponentCountException(
'SQOOP_SERVER', _('0 or 1'), s2s_count)
if s2s_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='SQOOP_SERVER')
if nm_count < 1:
raise ex.RequiredServiceMissingException(
'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
if hs_count != 1:
raise ex.RequiredServiceMissingException(
'YARN_JOBHISTORY', required_by='SQOOP_SERVER')
@classmethod
def _hbase_indexer_validation(cls, cluster):
lhbi_count = cls._get_inst_count(cluster, 'HBASE_INDEXER')
zk_count = cls._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
slr_count = cls._get_inst_count(cluster, 'SOLR_SERVER')
hbm_count = cls._get_inst_count(cluster, 'HBASE_MASTER')
if lhbi_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='HBASE_INDEXER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='HBASE_INDEXER')
if slr_count < 1:
raise ex.RequiredServiceMissingException(
'SOLR_SERVER', required_by='HBASE_INDEXER')
if hbm_count < 1:
raise ex.RequiredServiceMissingException(
'HBASE_MASTER', required_by='HBASE_INDEXER')
@classmethod
def _impala_validation(cls, cluster):
ics_count = cls._get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
iss_count = cls._get_inst_count(cluster, 'IMPALA_STATESTORE')
id_count = cls._get_inst_count(cluster, 'IMPALAD')
dn_count = cls._get_inst_count(cluster, 'HDFS_DATANODE')
hms_count = cls._get_inst_count(cluster, 'HIVE_METASTORE')
if ics_count > 1:
raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
_('0 or 1'), ics_count)
if iss_count > 1:
raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
_('0 or 1'), iss_count)
if ics_count == 1:
datanode_ng = u.get_node_groups(cluster, "HDFS_DATANODE")
impalad_ng = u.get_node_groups(cluster, "IMPALAD")
datanodes = set(ng.id for ng in datanode_ng)
impalads = set(ng.id for ng in impalad_ng)
if datanodes != impalads:
raise ex.InvalidClusterTopology(
_("IMPALAD must be installed on every HDFS_DATANODE"))
if iss_count != 1:
raise ex.RequiredServiceMissingException(
'IMPALA_STATESTORE', required_by='IMPALA')
if id_count < 1:
raise ex.RequiredServiceMissingException(
'IMPALAD', required_by='IMPALA')
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'HDFS_DATANODE', required_by='IMPALA')
if hms_count < 1:
raise ex.RequiredServiceMissingException(
'HIVE_METASTORE', required_by='IMPALA')
@classmethod
def _kms_validation(cls, cluster):
kms_count = cls._get_inst_count(cluster, 'KMS')
if kms_count > 1:
raise ex.InvalidComponentCountException('KMS',
_('0 or 1'), kms_count)
@classmethod
def _get_anti_affinity(cls, cluster):
return cluster.anti_affinity
|
|
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
CloudWatch service from AWS.
"""
try:
import simplejson as json
except ImportError:
import json
from boto.connection import AWSQueryConnection
from boto.ec2.cloudwatch.metric import Metric
from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem
from boto.ec2.cloudwatch.datapoint import Datapoint
from boto.regioninfo import RegionInfo
import boto
RegionData = {
'us-east-1' : 'monitoring.us-east-1.amazonaws.com',
'us-west-1' : 'monitoring.us-west-1.amazonaws.com',
'us-west-2' : 'monitoring.us-west-2.amazonaws.com',
'sa-east-1' : 'monitoring.sa-east-1.amazonaws.com',
'eu-west-1' : 'monitoring.eu-west-1.amazonaws.com',
'ap-northeast-1' : 'monitoring.ap-northeast-1.amazonaws.com',
'ap-southeast-1' : 'monitoring.ap-southeast-1.amazonaws.com'}
def regions():
"""
Get all available regions for the CloudWatch service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=CloudWatchConnection)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.cloudwatch.CloudWatchConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.CloudWatchConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class CloudWatchConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2010-08-01')
DefaultRegionName = boto.config.get('Boto', 'cloudwatch_region_name',
'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto',
'cloudwatch_region_endpoint',
'monitoring.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/'):
"""
Init method to create a new connection to EC2 Monitoring Service.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['ec2']
def build_dimension_param(self, dimension, params):
for i, dim_name in enumerate(dimension):
dim_value = dimension[dim_name]
if isinstance(dim_value, basestring):
dim_value = [dim_value]
for j, value in enumerate(dim_value):
params['Dimensions.member.%d.Name.%d' % (i+1, j+1)] = dim_name
params['Dimensions.member.%d.Value.%d' % (i+1, j+1)] = value
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for index, item in enumerate(items):
i = index + 1
if isinstance(item, dict):
for k,v in item.iteritems():
params[label % (i, 'Name')] = k
if v is not None:
params[label % (i, 'Value')] = v
else:
params[label % i] = item
def build_put_params(self, params, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
args = (name, value, unit, dimensions, statistics)
length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args))
def aslist(a):
if isinstance(a, list):
if len(a) != length:
raise Exception('Must specify equal number of elements; expected %d.' % length)
return a
return [a] * length
for index, (n, v, u, d, s) in enumerate(zip(*map(aslist, args))):
metric_data = {'MetricName': n}
if timestamp:
metric_data['Timestamp'] = timestamp.isoformat()
if unit:
metric_data['Unit'] = u
if dimensions:
self.build_dimension_param(d, metric_data)
if statistics:
metric_data['StatisticValues.Maximum'] = s['maximum']
metric_data['StatisticValues.Minimum'] = s['minimum']
metric_data['StatisticValues.SampleCount'] = s['samplecount']
metric_data['StatisticValues.Sum'] = s['sum']
if value != None:
msg = 'You supplied a value and statistics for a metric.'
msg += 'Posting statistics and not value.'
boto.log.warn(msg)
elif value != None:
metric_data['Value'] = v
else:
raise Exception('Must specify a value or statistics to put.')
for key, value in metric_data.iteritems():
params['MetricData.member.%d.%s' % (index + 1, key)] = value
def get_metric_statistics(self, period, start_time, end_time, metric_name,
namespace, statistics, dimensions=None,
unit=None):
"""
Get time-series data for one or more statistics of a given metric.
:type period: integer
:param period: The granularity, in seconds, of the returned datapoints.
Period must be at least 60 seconds and must be a multiple
of 60. The default value is 60.
:type start_time: datetime
:param start_time: The time stamp to use for determining the first
datapoint to return. The value specified is
inclusive; results include datapoints with the
time stamp specified.
:type end_time: datetime
:param end_time: The time stamp to use for determining the last
datapoint to return. The value specified is
exclusive; results will include datapoints up to
the time stamp specified.
:type metric_name: string
:param metric_name: The metric name.
:type namespace: string
:param namespace: The metric's namespace.
:type statistics: list
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
:rtype: list
"""
params = {'Period' : period,
'MetricName' : metric_name,
'Namespace' : namespace,
'StartTime' : start_time.isoformat(),
'EndTime' : end_time.isoformat()}
self.build_list_params(params, statistics, 'Statistics.member.%d')
if dimensions:
self.build_dimension_param(dimensions, params)
return self.get_list('GetMetricStatistics', params,
[('member', Datapoint)])
def list_metrics(self, next_token=None, dimensions=None,
metric_name=None, namespace=None):
"""
Returns a list of the valid metrics for which there is recorded
data available.
:type next_token: str
:param next_token: A maximum of 500 metrics will be returned at one
time. If more results are available, the
ResultSet returned will contain a non-Null
next_token attribute. Passing that token as a
parameter to list_metrics will retrieve the
next page of metrics.
:type dimension: dict
:param dimension_filters: A dictionary containing name/value pairs
that will be used to filter the results.
The key in the dictionary is the name of
a Dimension. The value in the dictionary
is either a scalar value of that Dimension
name that you want to filter on, a list
of values to filter on or None if
you want all metrics with that Dimension name.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
all Metric names will be returned.
:type namespace: str
:param namespace: A Metric namespace to filter against (e.g. AWS/EC2).
If None, Metrics from all namespaces will be returned.
"""
params = {}
if next_token:
params['NextToken'] = next_token
if dimensions:
self.build_dimension_param(dimensions, params)
if metric_name:
params['MetricName'] = metric_name
if namespace:
params['Namespace'] = namespace
return self.get_list('ListMetrics', params, [('member', Metric)])
def put_metric_data(self, namespace, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
"""
Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
associates the data points with the specified metric. If the specified
metric does not exist, Amazon CloudWatch creates the metric. If a list
is specified for some, but not all, of the arguments, the remaining
arguments are repeated a corresponding number of times.
:type namespace: str
:param namespace: The namespace of the metric.
:type name: str or list
:param name: The name of the metric.
:type value: float or list
:param value: The value for the metric.
:type timestamp: datetime or list
:param timestamp: The time stamp used for the metric. If not specified,
the default value is set to the time the metric data was received.
:type unit: string or list
:param unit: The unit of the metric. Valid Values: Seconds |
Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
:type dimensions: dict
:param dimensions: Add extra name value pairs to associate
with the metric, i.e.:
{'name1': value1, 'name2': (value2, value3)}
:type statistics: dict or list
:param statistics: Use a statistic set instead of a value, for example::
{'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000}
"""
params = {'Namespace': namespace}
self.build_put_params(params, name, value=value, timestamp=timestamp,
unit=unit, dimensions=dimensions, statistics=statistics)
return self.get_status('PutMetricData', params)
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None,
alarm_names=None, max_records=None, state_value=None,
next_token=None):
"""
Retrieves alarms with the specified names. If no name is specified, all
alarms for the user are returned. Alarms can be retrieved by using only
a prefix for the alarm name, the alarm state, or a prefix for any
action.
:type action_prefix: string
:param action_name: The action name prefix.
:type alarm_name_prefix: string
:param alarm_name_prefix: The alarm name prefix. AlarmNames cannot
be specified if this parameter is specified.
:type alarm_names: list
:param alarm_names: A list of alarm names to retrieve information for.
:type max_records: int
:param max_records: The maximum number of alarm descriptions
to retrieve.
:type state_value: string
:param state_value: The state value to be used in matching alarms.
:type next_token: string
:param next_token: The token returned by a previous call to
indicate that there is more data.
:rtype list
"""
params = {}
if action_prefix:
params['ActionPrefix'] = action_prefix
if alarm_name_prefix:
params['AlarmNamePrefix'] = alarm_name_prefix
elif alarm_names:
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if state_value:
params['StateValue'] = state_value
return self.get_list('DescribeAlarms', params,
[('MetricAlarms', MetricAlarms)])[0]
def describe_alarm_history(self, alarm_name=None,
start_date=None, end_date=None,
max_records=None, history_item_type=None,
next_token=None):
"""
Retrieves history for the specified alarm. Filter alarms by date range
or item type. If an alarm name is not specified, Amazon CloudWatch
returns histories for all of the owner's alarms.
Amazon CloudWatch retains the history of deleted alarms for a period of
six weeks. If an alarm has been deleted, its history can still be
queried.
:type alarm_name: string
:param alarm_name: The name of the alarm.
:type start_date: datetime
:param start_date: The starting date to retrieve alarm history.
:type end_date: datetime
:param end_date: The starting date to retrieve alarm history.
:type history_item_type: string
:param history_item_type: The type of alarm histories to retreive
(ConfigurationUpdate | StateUpdate | Action)
:type max_records: int
:param max_records: The maximum number of alarm descriptions
to retrieve.
:type next_token: string
:param next_token: The token returned by a previous call to indicate
that there is more data.
:rtype list
"""
params = {}
if alarm_name:
params['AlarmName'] = alarm_name
if start_date:
params['StartDate'] = start_date.isoformat()
if end_date:
params['EndDate'] = end_date.isoformat()
if history_item_type:
params['HistoryItemType'] = history_item_type
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAlarmHistory', params,
[('member', AlarmHistoryItem)])
def describe_alarms_for_metric(self, metric_name, namespace, period=None,
statistic=None, dimensions=None, unit=None):
"""
Retrieves all alarms for a single metric. Specify a statistic, period,
or unit to filter the set of alarms further.
:type metric_name: string
:param metric_name: The name of the metric
:type namespace: string
:param namespace: The namespace of the metric.
:type period: int
:param period: The period in seconds over which the statistic
is applied.
:type statistic: string
:param statistic: The statistic for the metric.
:param dimension_filters: A dictionary containing name/value pairs
that will be used to filter the results.
The key in the dictionary is the name of
a Dimension. The value in the dictionary
is either a scalar value of that Dimension
name that you want to filter on, a list
of values to filter on or None if
you want all metrics with that Dimension name.
:type unit: string
:rtype list
"""
params = {'MetricName' : metric_name,
'Namespace' : namespace}
if period:
params['Period'] = period
if statistic:
params['Statistic'] = statistic
if dimensions:
self.build_dimension_param(dimensions, params)
if unit:
params['Unit'] = unit
return self.get_list('DescribeAlarmsForMetric', params,
[('member', MetricAlarm)])
def put_metric_alarm(self, alarm):
"""
Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
"""
params = {
'AlarmName' : alarm.name,
'MetricName' : alarm.metric,
'Namespace' : alarm.namespace,
'Statistic' : alarm.statistic,
'ComparisonOperator' : alarm.comparison,
'Threshold' : alarm.threshold,
'EvaluationPeriods' : alarm.evaluation_periods,
'Period' : alarm.period,
}
if alarm.actions_enabled is not None:
params['ActionsEnabled'] = alarm.actions_enabled
if alarm.alarm_actions:
self.build_list_params(params, alarm.alarm_actions,
'AlarmActions.member.%s')
if alarm.description:
params['AlarmDescription'] = alarm.description
if alarm.dimensions:
self.build_dimension_param(alarm.dimensions, params)
if alarm.insufficient_data_actions:
self.build_list_params(params, alarm.insufficient_data_actions,
'InsufficientDataActions.member.%s')
if alarm.ok_actions:
self.build_list_params(params, alarm.ok_actions,
'OKActions.member.%s')
if alarm.unit:
params['Unit'] = alarm.unit
alarm.connection = self
return self.get_status('PutMetricAlarm', params)
create_alarm = put_metric_alarm
update_alarm = put_metric_alarm
def delete_alarms(self, alarms):
"""
Deletes all specified alarms. In the event of an error, no
alarms are deleted.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarms, 'AlarmNames.member.%s')
return self.get_status('DeleteAlarms', params)
def set_alarm_state(self, alarm_name, state_reason, state_value,
state_reason_data=None):
"""
Temporarily sets the state of an alarm. When the updated StateValue
differs from the previous value, the action configured for the
appropriate state is invoked. This is not a permanent change. The next
periodic alarm check (in about a minute) will set the alarm to its
actual state.
:type alarm_name: string
:param alarm_name: Descriptive name for alarm.
:type state_reason: string
:param state_reason: Human readable reason.
:type state_value: string
:param state_value: OK | ALARM | INSUFFICIENT_DATA
:type state_reason_data: string
:param state_reason_data: Reason string (will be jsonified).
"""
params = {'AlarmName' : alarm_name,
'StateReason' : state_reason,
'StateValue' : state_value}
if state_reason_data:
params['StateReasonData'] = json.dumps(state_reason_data)
return self.get_status('SetAlarmState', params)
def enable_alarm_actions(self, alarm_names):
"""
Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('EnableAlarmActions', params)
def disable_alarm_actions(self, alarm_names):
"""
Disables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('DisableAlarmActions', params)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import celery
import elasticsearch
import packaging.version
import pretend
import pytest
import redis
from first import first
import warehouse.search.tasks
from warehouse.search.tasks import (
reindex,
reindex_project,
unindex_project,
_project_docs,
SearchLock,
)
from ...common.db.packaging import ProjectFactory, ReleaseFactory
def test_project_docs(db_session):
projects = [ProjectFactory.create() for _ in range(2)]
releases = {
p: sorted(
[ReleaseFactory.create(project=p) for _ in range(3)],
key=lambda r: packaging.version.parse(r.version),
reverse=True,
)
for p in projects
}
assert list(_project_docs(db_session)) == [
{
"_id": p.normalized_name,
"_type": "doc",
"_source": {
"created": p.created,
"name": p.name,
"normalized_name": p.normalized_name,
"version": [r.version for r in prs],
"latest_version": first(prs, key=lambda r: not r.is_prerelease).version,
},
}
for p, prs in sorted(releases.items(), key=lambda x: x[0].id)
]
def test_single_project_doc(db_session):
projects = [ProjectFactory.create() for _ in range(2)]
releases = {
p: sorted(
[ReleaseFactory.create(project=p) for _ in range(3)],
key=lambda r: packaging.version.parse(r.version),
reverse=True,
)
for p in projects
}
assert list(_project_docs(db_session, project_name=projects[1].name)) == [
{
"_id": p.normalized_name,
"_type": "doc",
"_source": {
"created": p.created,
"name": p.name,
"normalized_name": p.normalized_name,
"version": [r.version for r in prs],
"latest_version": first(prs, key=lambda r: not r.is_prerelease).version,
},
}
for p, prs in sorted(releases.items(), key=lambda x: x[0].name.lower())
if p.name == projects[1].name
]
class FakeESIndices:
def __init__(self):
self.indices = {}
self.aliases = {}
self.put_settings = pretend.call_recorder(lambda *a, **kw: None)
self.delete = pretend.call_recorder(lambda *a, **kw: None)
self.create = pretend.call_recorder(lambda *a, **kw: None)
def exists_alias(self, name):
return name in self.aliases
def get_alias(self, name):
return self.aliases[name]
def put_alias(self, name, index):
self.aliases.setdefault(name, []).append(index)
def remove_alias(self, name, alias):
self.aliases[name] = [n for n in self.aliases[name] if n != alias]
if not self.aliases[name]:
del self.aliases[name]
def update_aliases(self, body):
for items in body["actions"]:
for action, values in items.items():
if action == "add":
self.put_alias(values["alias"], values["index"])
elif action == "remove":
self.remove_alias(values["alias"], values["index"])
else:
raise ValueError("Unknown action: {!r}.".format(action))
class FakeESClient:
def __init__(self):
self.indices = FakeESIndices()
class NotLock:
def __init__(*a, **kw):
pass
def acquire(self):
return True
def release(self):
return True
class TestSearchLock:
def test_success(self):
lock_stub = pretend.stub(acquire=pretend.call_recorder(lambda: True))
r = pretend.stub(lock=lambda *a, **kw: lock_stub)
test_lock = SearchLock(r)
test_lock.__enter__()
assert lock_stub.acquire.calls == [pretend.call()]
def test_failure(self):
lock_stub = pretend.stub(acquire=pretend.call_recorder(lambda: False))
r = pretend.stub(lock=lambda *a, **kw: lock_stub)
test_lock = SearchLock(r)
with pytest.raises(redis.exceptions.LockError):
test_lock.__enter__()
assert lock_stub.acquire.calls == [pretend.call()]
class TestReindex:
def test_fails_when_raising(self, db_request, monkeypatch):
docs = pretend.stub()
def project_docs(db):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
task = pretend.stub()
es_client = FakeESClient()
db_request.registry.update({"elasticsearch.index": "warehouse"})
db_request.registry.settings = {
"elasticsearch.url": "http://some.url",
"celery.scheduler_url": "redis://redis:6379/0",
}
monkeypatch.setattr(
warehouse.search.tasks.elasticsearch,
"Elasticsearch",
lambda *a, **kw: es_client,
)
class TestException(Exception):
pass
def parallel_bulk(client, iterable, index=None):
assert client is es_client
assert iterable is docs
assert index == "warehouse-cbcbcbcbcb"
raise TestException
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(os, "urandom", lambda n: b"\xcb" * n)
with pytest.raises(TestException):
reindex(task, db_request)
assert es_client.indices.delete.calls == [
pretend.call(index="warehouse-cbcbcbcbcb")
]
assert es_client.indices.put_settings.calls == []
def test_retry_on_lock(self, db_request, monkeypatch):
task = pretend.stub(
retry=pretend.call_recorder(pretend.raiser(celery.exceptions.Retry))
)
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
le = redis.exceptions.LockError()
monkeypatch.setattr(
redis.StrictRedis,
"from_url",
lambda *a, **kw: pretend.stub(lock=pretend.raiser(le)),
)
with pytest.raises(celery.exceptions.Retry):
reindex(task, db_request)
assert task.retry.calls == [pretend.call(countdown=60, exc=le)]
def test_successfully_indexes_and_adds_new(self, db_request, monkeypatch):
docs = pretend.stub()
def project_docs(db):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
task = pretend.stub()
es_client = FakeESClient()
db_request.registry.update(
{"elasticsearch.index": "warehouse", "elasticsearch.shards": 42}
)
db_request.registry.settings = {
"elasticsearch.url": "http://some.url",
"celery.scheduler_url": "redis://redis:6379/0",
}
monkeypatch.setattr(
warehouse.search.tasks.elasticsearch,
"Elasticsearch",
lambda *a, **kw: es_client,
)
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
parallel_bulk = pretend.call_recorder(lambda client, iterable, index: [None])
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(os, "urandom", lambda n: b"\xcb" * n)
reindex(task, db_request)
assert parallel_bulk.calls == [
pretend.call(es_client, docs, index="warehouse-cbcbcbcbcb")
]
assert es_client.indices.create.calls == [
pretend.call(
body={
"settings": {
"number_of_shards": 42,
"number_of_replicas": 0,
"refresh_interval": "-1",
}
},
wait_for_active_shards=42,
index="warehouse-cbcbcbcbcb",
)
]
assert es_client.indices.delete.calls == []
assert es_client.indices.aliases == {"warehouse": ["warehouse-cbcbcbcbcb"]}
assert es_client.indices.put_settings.calls == [
pretend.call(
index="warehouse-cbcbcbcbcb",
body={"index": {"number_of_replicas": 0, "refresh_interval": "1s"}},
)
]
def test_successfully_indexes_and_replaces(self, db_request, monkeypatch):
docs = pretend.stub()
task = pretend.stub()
def project_docs(db):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
es_client.indices.indices["warehouse-aaaaaaaaaa"] = None
es_client.indices.aliases["warehouse"] = ["warehouse-aaaaaaaaaa"]
db_engine = pretend.stub()
db_request.registry.update(
{
"elasticsearch.index": "warehouse",
"elasticsearch.shards": 42,
"sqlalchemy.engine": db_engine,
}
)
db_request.registry.settings = {
"elasticsearch.url": "http://some.url",
"celery.scheduler_url": "redis://redis:6379/0",
}
monkeypatch.setattr(
warehouse.search.tasks.elasticsearch,
"Elasticsearch",
lambda *a, **kw: es_client,
)
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
parallel_bulk = pretend.call_recorder(lambda client, iterable, index: [None])
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(os, "urandom", lambda n: b"\xcb" * n)
reindex(task, db_request)
assert parallel_bulk.calls == [
pretend.call(es_client, docs, index="warehouse-cbcbcbcbcb")
]
assert es_client.indices.create.calls == [
pretend.call(
body={
"settings": {
"number_of_shards": 42,
"number_of_replicas": 0,
"refresh_interval": "-1",
}
},
wait_for_active_shards=42,
index="warehouse-cbcbcbcbcb",
)
]
assert es_client.indices.delete.calls == [pretend.call("warehouse-aaaaaaaaaa")]
assert es_client.indices.aliases == {"warehouse": ["warehouse-cbcbcbcbcb"]}
assert es_client.indices.put_settings.calls == [
pretend.call(
index="warehouse-cbcbcbcbcb",
body={"index": {"number_of_replicas": 0, "refresh_interval": "1s"}},
)
]
class TestPartialReindex:
def test_reindex_fails_when_raising(self, db_request, monkeypatch):
docs = pretend.stub()
task = pretend.stub()
def project_docs(db, project_name=None):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
db_request.registry.update(
{"elasticsearch.client": es_client, "elasticsearch.index": "warehouse"}
)
class TestException(Exception):
pass
def parallel_bulk(client, iterable, index=None):
assert client is es_client
assert iterable is docs
raise TestException
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
with pytest.raises(TestException):
reindex_project(task, db_request, "foo")
assert es_client.indices.put_settings.calls == []
def test_unindex_fails_when_raising(self, db_request, monkeypatch):
task = pretend.stub()
class TestException(Exception):
pass
es_client = FakeESClient()
es_client.delete = pretend.raiser(TestException)
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
db_request.registry.update(
{"elasticsearch.client": es_client, "elasticsearch.index": "warehouse"}
)
with pytest.raises(TestException):
unindex_project(task, db_request, "foo")
def test_unindex_accepts_defeat(self, db_request, monkeypatch):
task = pretend.stub()
es_client = FakeESClient()
es_client.delete = pretend.call_recorder(
pretend.raiser(elasticsearch.exceptions.NotFoundError)
)
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
db_request.registry.update(
{"elasticsearch.client": es_client, "elasticsearch.index": "warehouse"}
)
unindex_project(task, db_request, "foo")
assert es_client.delete.calls == [
pretend.call(index="warehouse", doc_type="doc", id="foo")
]
def test_unindex_retry_on_lock(self, db_request, monkeypatch):
task = pretend.stub(
retry=pretend.call_recorder(pretend.raiser(celery.exceptions.Retry))
)
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
le = redis.exceptions.LockError()
monkeypatch.setattr(
redis.StrictRedis,
"from_url",
lambda *a, **kw: pretend.stub(lock=pretend.raiser(le)),
)
with pytest.raises(celery.exceptions.Retry):
unindex_project(task, db_request, "foo")
assert task.retry.calls == [pretend.call(countdown=60, exc=le)]
def test_reindex_retry_on_lock(self, db_request, monkeypatch):
task = pretend.stub(
retry=pretend.call_recorder(pretend.raiser(celery.exceptions.Retry))
)
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
le = redis.exceptions.LockError()
monkeypatch.setattr(
redis.StrictRedis,
"from_url",
lambda *a, **kw: pretend.stub(lock=pretend.raiser(le)),
)
with pytest.raises(celery.exceptions.Retry):
reindex_project(task, db_request, "foo")
assert task.retry.calls == [pretend.call(countdown=60, exc=le)]
def test_successfully_indexes(self, db_request, monkeypatch):
docs = pretend.stub()
task = pretend.stub()
def project_docs(db, project_name=None):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
es_client.indices.indices["warehouse-aaaaaaaaaa"] = None
es_client.indices.aliases["warehouse"] = ["warehouse-aaaaaaaaaa"]
db_engine = pretend.stub()
db_request.registry.update(
{
"elasticsearch.client": es_client,
"elasticsearch.index": "warehouse",
"elasticsearch.shards": 42,
"sqlalchemy.engine": db_engine,
}
)
parallel_bulk = pretend.call_recorder(
lambda client, iterable, index=None: [None]
)
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(
redis.StrictRedis, "from_url", lambda *a, **kw: pretend.stub(lock=NotLock)
)
reindex_project(task, db_request, "foo")
assert parallel_bulk.calls == [pretend.call(es_client, docs, index="warehouse")]
assert es_client.indices.create.calls == []
assert es_client.indices.delete.calls == []
assert es_client.indices.aliases == {"warehouse": ["warehouse-aaaaaaaaaa"]}
assert es_client.indices.put_settings.calls == []
|
|
'''
Defines the manager for plugin layout and loading.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2006, 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
import gi
from gi.repository import Gtk as gtk
from gi.repository.Gio import Settings as GSettings
from base_plugin import Plugin
from view import ViewManager
from accerciser.tools import Tools, getTreePathBoundingBox
from message import MessageManager
import os
import sys
import imp
import traceback
from accerciser.i18n import _, N_, C_
GSCHEMA = 'org.a11y.Accerciser'
class PluginManager(gtk.ListStore, Tools):
'''
@cvar COL_INSTANCE: Instance column ID.
@type COL_INSTANCE: integer
@cvar COL_CLASS: Class column ID.
@type COL_CLASS: integer
@cvar COL_PATH: Module path column ID.
@type COL_PATH: integer
@ivar node: Application's selected accessible node.
@type node: L{Node}
@ivar hotkey_manager: Application's hotkey manager.
@type hotkey_manager: L{HotkeyManager}
@ivar view_manager: Plugin view manager.
@type view_manager: L{ViewManager}
@ivar message_manager: Plugin message manager.
@type message_manager: L{MessageManager}
'''
COL_INSTANCE = 0
COL_CLASS = 1
COL_PATH = 2
def __init__(self, node, hotkey_manager, *main_views):
'''
Initialize the plugin manager.
@param node: The application's main node.
@type node: L{Node}
@param hotkey_manager: Application's hot key manager.
@type hotkey_manager: L{HotkeyManager}
@param main_views: List of permanent plugin views.
@type main_views: list of {PluginView}
'''
gtk.ListStore.__init__(self,
object, # Plugin instance
object, # Plugin class
str) # Plugin path
self.node = node
self.hotkey_manager = hotkey_manager
self.gsettings = GSettings(schema=GSCHEMA)
self.view_manager = ViewManager(*main_views)
self.message_manager = MessageManager()
self.message_manager.connect('plugin-reload-request',
self._onPluginReloadRequest)
self.message_manager.connect('module-reload-request',
self._onModuleReloadRequest)
message_tab = self.message_manager.getMessageTab()
self.view_manager.addElement(message_tab)
self._row_changed_handler = \
self.connect('row_changed', self._onPluginRowChanged)
self._loadPlugins()
def close(self):
'''
Close view manager and plugins.
'''
self.view_manager.close()
for row in self:
plugin = row[self.COL_INSTANCE]
if plugin:
plugin._close()
def _loadPlugins(self):
'''
Load all plugins in global and local plugin paths.
'''
# AQUI PETAA
for plugin_dir, plugin_fn in self._getPluginFiles():
self._loadPluginFile(plugin_dir, plugin_fn)
self.view_manager.initialView()
def _getPluginFiles(self):
'''
Get list of all modules in plugin paths.
@return: List of plugin files with their paths.
@rtype: tuple
'''
plugin_file_list = []
plugin_dir_local = os.path.join(os.environ['HOME'],
'.accerciser', 'plugins')
plugin_dir_global = os.path.join(sys.prefix, 'share',
'accerciser', 'plugins')
for plugin_dir in (plugin_dir_local, plugin_dir_global):
if not os.path.isdir(plugin_dir):
continue
for fn in os.listdir(plugin_dir):
if fn.endswith('.py') and not fn.startswith('.'):
plugin_file_list.append((plugin_dir, fn[:-3]))
return plugin_file_list
def _getPluginLocals(self, plugin_dir, plugin_fn):
'''
Get namespace of given module
@param plugin_dir: Path.
@type plugin_dir: string
@param plugin_fn: Module.
@type plugin_fn: string
@return: Dictionary of modules symbols.
@rtype: dictionary
'''
sys.path.insert(0, plugin_dir)
try:
params = imp.find_module(plugin_fn, [plugin_dir])
plugin = imp.load_module(plugin_fn, *params)
plugin_locals = plugin.__dict__
except Exception, e:
self.message_manager.newModuleError(plugin_fn, plugin_dir,
traceback.format_exception_only(e.__class__, e)[0].strip(),
traceback.format_exc())
return {}
sys.path.pop(0)
return plugin_locals
def _loadPluginFile(self, plugin_dir, plugin_fn):
'''
Find plugin implementations in the given module, and store them.
@param plugin_dir: Path.
@type plugin_dir: string
@param plugin_fn: Module.
@type plugin_fn: string
'''
plugin_locals = self._getPluginLocals(plugin_dir, plugin_fn)
# use keys list to avoid size changes during iteration
for symbol in plugin_locals.keys():
try:
is_plugin = \
issubclass(plugin_locals[symbol], Plugin) and \
getattr(plugin_locals[symbol], 'plugin_name', None)
except TypeError:
continue
if is_plugin:
self.handler_block(self._row_changed_handler)
iter_id = self.append([None, plugin_locals[symbol], plugin_dir])
self.handler_unblock(self._row_changed_handler)
# if a plugin class is found, initialize
disabled_list = self.gsettings.get_strv('disabled-plugins')
enabled = plugin_locals[symbol].plugin_name not in \
disabled_list
if enabled:
self._enablePlugin(iter_id)
self.row_changed(self.get_path(iter_id), iter_id)
def _enablePlugin(self, iter):
'''
Instantiate a plugin class pointed to by the given iter.
@param iter: Iter of plugin class we should instantiate.
@type iter: gtk.TreeIter
'''
plugin_class = self[iter][self.COL_CLASS]
plugin_instance = None
try:
plugin_instance = plugin_class(self.node, self.message_manager)
plugin_instance.init()
for key_combo in plugin_instance.global_hotkeys:
self.hotkey_manager.addKeyCombo(
plugin_class.plugin_name,
plugin_class.plugin_name_localized or plugin_class.plugin_name
, *key_combo)
except Exception, e:
self.message_manager.newPluginError(
plugin_instance, plugin_class,
traceback.format_exception_only(e.__class__, e)[0].strip(),
traceback.format_exc())
try:
plugin_instance._close()
except:
pass
return
self[iter][self.COL_INSTANCE] = plugin_instance
if isinstance(plugin_instance, gtk.Widget):
self.view_manager.addElement(plugin_instance)
plugin_instance.onAccChanged(plugin_instance.node.acc)
disabled_list = self.gsettings.get_strv('disabled-plugins')
if plugin_instance.plugin_name in disabled_list:
disabled_list.remove(plugin_instance.plugin_name)
self.gsettings.set_strv('disabled-plugins', disabled_list)
def _disablePlugin(self, iter):
'''
Disable plugin pointed to by the given iter.
@param iter: Iter of plugin instance to be disabled.
@type iter: gtk.TreeIter
'''
plugin_instance = self[iter][self.COL_INSTANCE]
if not plugin_instance: return
for key_combo in plugin_instance.global_hotkeys:
self.hotkey_manager.removeKeyCombo(
plugin_instance.plugin_name, *key_combo)
if isinstance(plugin_instance, gtk.Widget):
plugin_instance.destroy()
plugin_instance._close()
disabled_list = self.gsettings.get_strv('disabled-plugins')
if not plugin_instance.plugin_name in disabled_list:
disabled_list.append(plugin_instance.plugin_name)
self.gsettings.set_strv('disabled-plugins', disabled_list)
self[iter][self.COL_INSTANCE] = False
def _reloadPlugin(self, iter):
'''
Reload plugin pointed to by the given iter.
@param iter: Iter of plugin to be reloaded.
@type iter: gtk.TreeIter
@return: New instance of plugin
@rtype: L{Plugin}
'''
old_class = self[iter][self.COL_CLASS]
plugin_fn = old_class.__module__
plugin_dir = self[iter][self.COL_PATH]
plugin_locals = self._getPluginLocals(plugin_dir, plugin_fn)
self[iter][self.COL_CLASS] = plugin_locals.get(old_class.__name__)
self._enablePlugin(iter)
return self[iter][self.COL_INSTANCE]
def _getIterWithClass(self, plugin_class):
'''
Get iter with given plugin class.
@param plugin_class: The plugin class to search for.
@type plugin_class: type
@return: The first iter with the given class.
@rtype: gtk.TreeIter
'''
for row in self:
if row[self.COL_CLASS] == plugin_class:
return row.iter
return None
def _onPluginReloadRequest(self, message_manager, message, plugin_class):
'''
Callback for a plugin reload request from the message manager.
@param message_manager: The message manager that emitted the signal.
@type message_manager: L{MessageManager}
@param message: The message widget.
@type message: L{PluginMessage}
@param plugin_class: The plugin class that should be reloaded.
@type plugin_class: type
'''
message.destroy()
iter = self._getIterWithClass(plugin_class)
if not iter: return
self._disablePlugin(iter)
plugin = self._reloadPlugin(iter)
if plugin:
self.view_manager.giveElementFocus(plugin)
def _onModuleReloadRequest(self, message_manager, message, module, path):
'''
Callback for a module reload request from the message manager.
@param message_manager: The message manager that emitted the signal.
@type message_manager: L{MessageManager}
@param message: The message widget.
@type message: L{PluginMessage}
@param module: The module to be reloaded.
@type module: string
@param path: The path of the module.
@type path: string
'''
message.destroy()
self._loadPluginFile(path, module)
def togglePlugin(self, path):
'''
Toggle the plugin, either enable or disable depending on current state.
@param path: Tree path to plugin.
@type path: tuple
'''
iter = self.get_iter(path)
if self[iter][self.COL_INSTANCE]:
self._disablePlugin(iter)
else:
self._reloadPlugin(iter)
def _onPluginRowChanged(self, model, path, iter):
'''
Callback for model row changes. Persists plugins state (enabled/disabled)
in gsettings.
@param model: Current model, actually self.
@type model: gtk.ListStore
@param path: Tree path of changed row.
@type path: tuple
@param iter: Iter of changed row.
@type iter: gtk.TreeIter
'''
plugin_class = model[iter][self.COL_CLASS]
if plugin_class is None:
return
plugin_instance = model[iter][self.COL_INSTANCE]
disabled_list = self.gsettings.get_strv('disabled-plugins')
if plugin_instance is None:
if plugin_class.plugin_name not in disabled_list:
disabled_list.append(plugin_class.plugin_name)
else:
if plugin_class.plugin_name in disabled_list:
disabled_list.remove(plugin_class.plugin_name)
def View(self):
'''
Helps emulate a non-static inner class. These don't exist in python,
I think.
@return: An inner view class.
@rtype: L{PluginManager._View}
'''
return self._View(self)
class _View(gtk.TreeView):
'''
Implements a treeview of a {PluginManager}
@ivar plugin_manager: Plugin manager to use as data model.
@type plugin_manager: L{PluginManager}
@ivar view_manager: View manager to use for plugin view data.
@type view_manager: L{ViewManager}
'''
def __init__(self, plugin_manager):
'''
Initialize view.
@param plugin_manager: Plugin manager to use as data model.
@type plugin_manager: L{PluginManager}
'''
gtk.TreeView.__init__(self)
self.plugin_manager = plugin_manager
self.view_manager = plugin_manager.view_manager
self.set_model(plugin_manager)
self.connect('button-press-event', self._onButtonPress)
self.connect('popup-menu', self._onPopupMenu)
crc = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn()
tvc.pack_start(crc, True)
tvc.set_cell_data_func(crc, self._pluginStateDataFunc)
crc.connect('toggled', self._onPluginToggled)
self.append_column(tvc)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Name'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._pluginNameDataFunc)
self.append_column(tvc)
crc = gtk.CellRendererText()
# Translators: This is the viewport in which the plugin appears,
# it is a noun.
#
tvc = gtk.TreeViewColumn(C_('viewport', 'View'))
tvc.pack_start(crc, False)
tvc.set_cell_data_func(crc, self._viewNameDataFunc)
crc.set_property('editable', True)
crc.connect('edited', self._onViewChanged)
self.append_column(tvc)
def _onButtonPress(self, widget, event):
'''
Callback for plugin view context menus.
@param widget: Widget that emitted signal.
@type widget: gtk.Widget
@param event: Event object.
@type event: gtk.gdk.Event
'''
if event.button == 3:
path = self.get_path_at_pos(int(event.x), int(event.y))[0]
self._showPopup(event.button, event.time, path)
def _onPopupMenu(self, widget):
'''
Callback for popup request event. Usually happens when keyboard
context menu os pressed.
@param widget: Widget that emitted signal.
@type widget: gtk.Widget
@return: Return true to stop event trickling.
@rtype: boolean
'''
path, col = self.get_cursor()
rect = getTreePathBoundingBox(self, path, col)
self._showPopup(0, gtk.get_current_event_time(),
path, lambda m, r: (r.x, r.y, True), rect)
return True
def _showPopup(self, button, time, path, pos_func=None, data=None):
'''
Convinience function for showing the view manager's popup menu.
@param button: Mouse button that was clicked.
@type button: integer
@param time: Time of event.
@type time: float
@param path: Tree path of context menu.
@type path: tuple
@param pos_func: Function to use for determining menu placement.
@type pos_func: callable
@param data: Additional data.
@type data: object
'''
plugin = \
self.plugin_manager[path][self.plugin_manager.COL_INSTANCE]
menu = self.view_manager.Menu(plugin, self.get_toplevel())
menu.popup(None, None, pos_func, button, time, data)
def _viewNameDataFunc(self, column, cell, model, iter, foo=None):
'''
Function for determining the displayed data in the tree's view column.
@param column: Column number.
@type column: integer
@param cell: Cellrender.
@type cell: gtk.CellRendererText
@param model: Tree's model
@type model: gtk.ListStore
@param iter: Tree iter of current row,
@type iter: gtk.TreeIter
'''
plugin_class = model[iter][self.plugin_manager.COL_CLASS]
if issubclass(plugin_class, gtk.Widget):
view_name = \
self.view_manager.getViewNameForPlugin(plugin_class.plugin_name)
cell.set_property('sensitive', True)
else:
view_name = N_('No view')
cell.set_property('sensitive', False)
cell.set_property('text', _(view_name))
def _pluginNameDataFunc(self, column, cell, model, iter, foo=None):
'''
Function for determining the displayed data in the tree's plugin column.
@param column: Column number.
@type column: integer
@param cell: Cellrender.
@type cell: gtk.CellRendererText
@param model: Tree's model
@type model: gtk.ListStore
@param iter: Tree iter of current row,
@type iter: gtk.TreeIter
'''
plugin_class = model[iter][self.plugin_manager.COL_CLASS]
cell.set_property('text', plugin_class.plugin_name_localized or \
plugin_class.plugin_name)
def _pluginStateDataFunc(self, column, cell, model, iter, foo=None):
'''
Function for determining the displayed state of the plugin's checkbox.
@param column: Column number.
@type column: integer
@param cell: Cellrender.
@type cell: gtk.CellRendererText
@param model: Tree's model
@type model: gtk.ListStore
@param iter: Tree iter of current row,
@type iter: gtk.TreeIter
'''
cell.set_property('active',
bool(model[iter][self.plugin_manager.COL_INSTANCE]))
def _onPluginToggled(self, renderer_toggle, path):
'''
Callback for a "toggled" signal from a L{gtk.CellRendererToggle} in the
plugin dialog. Passes along the toggle request to the L{PluginManager}.
@param renderer_toggle: The toggle cellrenderer that emitted the signal.
@type renderer_toggle: L{gtk.CellRendererToggle}
@param path: The path that has been toggled.
@type path: tuple
'''
self.plugin_manager.togglePlugin(path)
def _onViewChanged(self, cellrenderertext, path, new_text):
'''
Callback for an "edited" signal from a L{gtk.CellRendererCombo} in the
plugin dialog. Passes along the new requested view name to the
L{PluginManager}.
@param cellrenderertext: The combo cellrenderer that emitted the signal.
@type renderer_toggle: L{gtk.CellRendererCombo}
@param path: The path that has been touched.
@type path: tuple
@param new_text: The new text that has been entered in to the combo entry.
@type new_text: string
'''
plugin = \
self.plugin_manager[path][self.plugin_manager.COL_INSTANCE]
self.view_manager.changeView(plugin, new_text)
|
|
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import glob
import json
import os
import mock
import pytest
from google.protobuf import json_format
from google.cloud.firestore_v1.proto import document_pb2
from google.cloud.firestore_v1.proto import firestore_pb2
from google.cloud.firestore_v1.proto import tests_pb2
from google.cloud.firestore_v1.proto import write_pb2
def _load_test_json(filename):
with open(filename, "r") as tp_file:
tp_json = json.load(tp_file)
test_file = tests_pb2.TestFile()
json_format.ParseDict(tp_json, test_file)
shortname = os.path.split(filename)[-1]
for test_proto in test_file.tests:
test_proto.description = test_proto.description + " (%s)" % shortname
yield test_proto
_here = os.path.dirname(__file__)
_glob_expr = "{}/testdata/*.json".format(_here)
_globs = glob.glob(_glob_expr)
ALL_TESTPROTOS = []
for filename in sorted(_globs):
ALL_TESTPROTOS.extend(_load_test_json(filename))
_CREATE_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "create"
]
_GET_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "get"
]
_SET_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "set"
]
_UPDATE_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "update"
]
_UPDATE_PATHS_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "update_paths"
]
_DELETE_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "delete"
]
_LISTEN_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "listen"
]
_QUERY_TESTPROTOS = [
test_proto
for test_proto in ALL_TESTPROTOS
if test_proto.WhichOneof("test") == "query"
]
def _mock_firestore_api():
firestore_api = mock.Mock(spec=["commit"])
commit_response = firestore_pb2.CommitResponse(
write_results=[write_pb2.WriteResult()]
)
firestore_api.commit.return_value = commit_response
return firestore_api
def _make_client_document(firestore_api, testcase):
from google.cloud.firestore_v1 import Client
from google.cloud.firestore_v1.client import DEFAULT_DATABASE
import google.auth.credentials
_, project, _, database, _, doc_path = testcase.doc_ref_path.split("/", 5)
assert database == DEFAULT_DATABASE
# Attach the fake GAPIC to a real client.
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
client = Client(project=project, credentials=credentials)
client._firestore_api_internal = firestore_api
return client, client.document(doc_path)
def _run_testcase(testcase, call, firestore_api, client):
if getattr(testcase, "is_error", False):
# TODO: is there a subclass of Exception we can check for?
with pytest.raises(Exception):
call()
else:
call()
firestore_api.commit.assert_called_once_with(
client._database_string,
list(testcase.request.writes),
transaction=None,
metadata=client._rpc_metadata,
)
@pytest.mark.parametrize("test_proto", _CREATE_TESTPROTOS)
def test_create_testprotos(test_proto):
testcase = test_proto.create
firestore_api = _mock_firestore_api()
client, document = _make_client_document(firestore_api, testcase)
data = convert_data(json.loads(testcase.json_data))
call = functools.partial(document.create, data)
_run_testcase(testcase, call, firestore_api, client)
@pytest.mark.parametrize("test_proto", _GET_TESTPROTOS)
def test_get_testprotos(test_proto):
testcase = test_proto.get
firestore_api = mock.Mock(spec=["get_document"])
response = document_pb2.Document()
firestore_api.get_document.return_value = response
client, document = _make_client_document(firestore_api, testcase)
document.get() # No '.textprotos' for errors, field_paths.
firestore_api.get_document.assert_called_once_with(
document._document_path,
mask=None,
transaction=None,
metadata=client._rpc_metadata,
)
@pytest.mark.parametrize("test_proto", _SET_TESTPROTOS)
def test_set_testprotos(test_proto):
testcase = test_proto.set
firestore_api = _mock_firestore_api()
client, document = _make_client_document(firestore_api, testcase)
data = convert_data(json.loads(testcase.json_data))
if testcase.HasField("option"):
merge = convert_set_option(testcase.option)
else:
merge = False
call = functools.partial(document.set, data, merge=merge)
_run_testcase(testcase, call, firestore_api, client)
@pytest.mark.parametrize("test_proto", _UPDATE_TESTPROTOS)
def test_update_testprotos(test_proto):
testcase = test_proto.update
firestore_api = _mock_firestore_api()
client, document = _make_client_document(firestore_api, testcase)
data = convert_data(json.loads(testcase.json_data))
if testcase.HasField("precondition"):
option = convert_precondition(testcase.precondition)
else:
option = None
call = functools.partial(document.update, data, option)
_run_testcase(testcase, call, firestore_api, client)
@pytest.mark.skip(reason="Python has no way to call update with a list of field paths.")
@pytest.mark.parametrize("test_proto", _UPDATE_PATHS_TESTPROTOS)
def test_update_paths_testprotos(test_proto): # pragma: NO COVER
pass
@pytest.mark.parametrize("test_proto", _DELETE_TESTPROTOS)
def test_delete_testprotos(test_proto):
testcase = test_proto.delete
firestore_api = _mock_firestore_api()
client, document = _make_client_document(firestore_api, testcase)
if testcase.HasField("precondition"):
option = convert_precondition(testcase.precondition)
else:
option = None
call = functools.partial(document.delete, option)
_run_testcase(testcase, call, firestore_api, client)
@pytest.mark.parametrize("test_proto", _LISTEN_TESTPROTOS)
def test_listen_testprotos(test_proto): # pragma: NO COVER
# test_proto.listen has 'reponses' messages,
# 'google.firestore_v1.ListenResponse'
# and then an expected list of 'snapshots' (local 'Snapshot'), containing
# 'docs' (list of 'google.firestore_v1.Document'),
# 'changes' (list lof local 'DocChange', and 'read_time' timestamp.
from google.cloud.firestore_v1 import Client
from google.cloud.firestore_v1 import DocumentReference
from google.cloud.firestore_v1 import DocumentSnapshot
from google.cloud.firestore_v1 import Watch
import google.auth.credentials
testcase = test_proto.listen
testname = test_proto.description
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
client = Client(project="project", credentials=credentials)
modulename = "google.cloud.firestore_v1.watch"
with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc):
with mock.patch(
"%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer
):
with mock.patch( # conformance data sets WATCH_TARGET_ID to 1
"%s.WATCH_TARGET_ID" % modulename, 1
):
snapshots = []
def callback(keys, applied_changes, read_time):
snapshots.append((keys, applied_changes, read_time))
collection = DummyCollection(client=client)
query = DummyQuery(parent=collection)
watch = Watch.for_query(
query, callback, DocumentSnapshot, DocumentReference
)
# conformance data has db string as this
db_str = "projects/projectID/databases/(default)"
watch._firestore._database_string_internal = db_str
if testcase.is_error:
try:
for proto in testcase.responses:
watch.on_snapshot(proto)
except RuntimeError:
# listen-target-add-wrong-id.textpro
# listen-target-remove.textpro
pass
else:
for proto in testcase.responses:
watch.on_snapshot(proto)
assert len(snapshots) == len(testcase.snapshots)
for i, (expected_snapshot, actual_snapshot) in enumerate(
zip(testcase.snapshots, snapshots)
):
expected_changes = expected_snapshot.changes
actual_changes = actual_snapshot[1]
if len(expected_changes) != len(actual_changes):
raise AssertionError(
"change length mismatch in %s (snapshot #%s)"
% (testname, i)
)
for y, (expected_change, actual_change) in enumerate(
zip(expected_changes, actual_changes)
):
expected_change_kind = expected_change.kind
actual_change_kind = actual_change.type.value
if expected_change_kind != actual_change_kind:
raise AssertionError(
"change type mismatch in %s (snapshot #%s, change #%s')"
% (testname, i, y)
)
@pytest.mark.parametrize("test_proto", _QUERY_TESTPROTOS)
def test_query_testprotos(test_proto): # pragma: NO COVER
testcase = test_proto.query
if testcase.is_error:
with pytest.raises(Exception):
query = parse_query(testcase)
query._to_protobuf()
else:
query = parse_query(testcase)
found = query._to_protobuf()
assert found == testcase.query
def convert_data(v):
# Replace the strings 'ServerTimestamp' and 'Delete' with the corresponding
# sentinels.
from google.cloud.firestore_v1 import ArrayRemove
from google.cloud.firestore_v1 import ArrayUnion
from google.cloud.firestore_v1 import DELETE_FIELD
from google.cloud.firestore_v1 import SERVER_TIMESTAMP
if v == "ServerTimestamp":
return SERVER_TIMESTAMP
elif v == "Delete":
return DELETE_FIELD
elif isinstance(v, list):
if v[0] == "ArrayRemove":
return ArrayRemove([convert_data(e) for e in v[1:]])
if v[0] == "ArrayUnion":
return ArrayUnion([convert_data(e) for e in v[1:]])
return [convert_data(e) for e in v]
elif isinstance(v, dict):
return {k: convert_data(v2) for k, v2 in v.items()}
elif v == "NaN":
return float(v)
else:
return v
def convert_set_option(option):
from google.cloud.firestore_v1 import _helpers
if option.fields:
return [
_helpers.FieldPath(*field.field).to_api_repr() for field in option.fields
]
assert option.all
return True
def convert_precondition(precond):
from google.cloud.firestore_v1 import Client
if precond.HasField("exists"):
return Client.write_option(exists=precond.exists)
assert precond.HasField("update_time")
return Client.write_option(last_update_time=precond.update_time)
class DummyRpc(object): # pragma: NO COVER
def __init__(
self,
listen,
should_recover,
should_terminate=None,
initial_request=None,
metadata=None,
):
self.listen = listen
self.initial_request = initial_request
self.should_recover = should_recover
self.should_terminate = should_terminate
self.closed = False
self.callbacks = []
self._metadata = metadata
def add_done_callback(self, callback):
self.callbacks.append(callback)
def close(self):
self.closed = True
class DummyBackgroundConsumer(object): # pragma: NO COVER
started = False
stopped = False
is_active = True
def __init__(self, rpc, on_snapshot):
self._rpc = rpc
self.on_snapshot = on_snapshot
def start(self):
self.started = True
def stop(self):
self.stopped = True
self.is_active = False
class DummyCollection(object):
def __init__(self, client, parent=None):
self._client = client
self._parent = parent
def _parent_info(self):
return "{}/documents".format(self._client._database_string), None
class DummyQuery(object): # pragma: NO COVER
def __init__(self, parent):
self._parent = parent
self._comparator = lambda x, y: 1
@property
def _client(self):
return self._parent._client
def _to_protobuf(self):
from google.cloud.firestore_v1.proto import query_pb2
query_kwargs = {
"select": None,
"from": None,
"where": None,
"order_by": None,
"start_at": None,
"end_at": None,
}
return query_pb2.StructuredQuery(**query_kwargs)
def parse_query(testcase):
# 'query' testcase contains:
# - 'coll_path': collection ref path.
# - 'clauses': array of one or more 'Clause' elements
# - 'query': the actual google.firestore_v1.StructuredQuery message
# to be constructed.
# - 'is_error' (as other testcases).
#
# 'Clause' elements are unions of:
# - 'select': [field paths]
# - 'where': (field_path, op, json_value)
# - 'order_by': (field_path, direction)
# - 'offset': int
# - 'limit': int
# - 'start_at': 'Cursor'
# - 'start_after': 'Cursor'
# - 'end_at': 'Cursor'
# - 'end_before': 'Cursor'
#
# 'Cursor' contains either:
# - 'doc_snapshot': 'DocSnapshot'
# - 'json_values': [string]
#
# 'DocSnapshot' contains:
# 'path': str
# 'json_data': str
from google.auth.credentials import Credentials
from google.cloud.firestore_v1 import Client
from google.cloud.firestore_v1 import Query
_directions = {"asc": Query.ASCENDING, "desc": Query.DESCENDING}
credentials = mock.create_autospec(Credentials)
client = Client("projectID", credentials)
path = parse_path(testcase.coll_path)
collection = client.collection(*path)
query = collection
for clause in testcase.clauses:
kind = clause.WhichOneof("clause")
if kind == "select":
field_paths = [
".".join(field_path.field) for field_path in clause.select.fields
]
query = query.select(field_paths)
elif kind == "where":
path = ".".join(clause.where.path.field)
value = convert_data(json.loads(clause.where.json_value))
query = query.where(path, clause.where.op, value)
elif kind == "order_by":
path = ".".join(clause.order_by.path.field)
direction = clause.order_by.direction
direction = _directions.get(direction, direction)
query = query.order_by(path, direction=direction)
elif kind == "offset":
query = query.offset(clause.offset)
elif kind == "limit":
query = query.limit(clause.limit)
elif kind == "start_at":
cursor = parse_cursor(clause.start_at, client)
query = query.start_at(cursor)
elif kind == "start_after":
cursor = parse_cursor(clause.start_after, client)
query = query.start_after(cursor)
elif kind == "end_at":
cursor = parse_cursor(clause.end_at, client)
query = query.end_at(cursor)
elif kind == "end_before":
cursor = parse_cursor(clause.end_before, client)
query = query.end_before(cursor)
else: # pragma: NO COVER
raise ValueError("Unknown query clause: {}".format(kind))
return query
def parse_path(path):
_, relative = path.split("documents/")
return relative.split("/")
def parse_cursor(cursor, client):
from google.cloud.firestore_v1 import DocumentReference
from google.cloud.firestore_v1 import DocumentSnapshot
if cursor.HasField("doc_snapshot"):
path = parse_path(cursor.doc_snapshot.path)
doc_ref = DocumentReference(*path, client=client)
return DocumentSnapshot(
reference=doc_ref,
data=json.loads(cursor.doc_snapshot.json_data),
exists=True,
read_time=None,
create_time=None,
update_time=None,
)
values = [json.loads(value) for value in cursor.json_values]
return convert_data(values)
|
|
import asyncio
import gc
import os
import tracemalloc
from contextlib import suppress
from functools import partial
import aiormq
import pamqp
import pytest
from aiomisc import awaitable
from aiormq.connection import DEFAULT_PORTS
from yarl import URL
import aio_pika
@pytest.fixture
async def add_cleanup(loop):
entities = []
def payload(func, *args, **kwargs):
nonlocal entities
func = partial(awaitable(func), *args, **kwargs)
entities.append(func)
try:
yield payload
finally:
for func in entities[::-1]:
await func()
entities.clear()
@pytest.fixture
async def create_task(loop):
tasks = []
def payload(coroutine):
nonlocal tasks
task = loop.create_task(coroutine)
tasks.append(task)
return task
try:
yield payload
finally:
cancelled = []
for task in tasks:
if task.done():
continue
task.cancel()
cancelled.append(task)
results = await asyncio.gather(*cancelled, return_exceptions=True)
for result in results:
if not isinstance(result, asyncio.CancelledError):
raise result
@pytest.fixture
def amqp_direct_url(request) -> URL:
url = URL(
os.getenv("AMQP_URL", "amqp://guest:guest@localhost"),
).update_query(name=request.node.nodeid)
default_port = DEFAULT_PORTS[url.scheme]
if not url.port:
url = url.with_port(default_port)
return url
@pytest.fixture
def amqp_url(request, amqp_direct_url) -> URL:
query = dict(amqp_direct_url.query)
query["name"] = request.node.nodeid
return amqp_direct_url.with_query(**query)
@pytest.fixture(
scope="module",
params=[aio_pika.connect, aio_pika.connect_robust],
ids=["connect", "connect_robust"],
)
def connection_fabric(request):
return request.param
@pytest.fixture
def create_connection(connection_fabric, loop, amqp_url):
return partial(connection_fabric, amqp_url, loop=loop)
@pytest.fixture
def create_channel(connection: aio_pika.Connection, add_cleanup):
conn = connection
async def fabric(cleanup=True, connection=None, *args, **kwargs):
nonlocal add_cleanup, conn
if connection is None:
connection = conn
channel = await connection.channel(*args, **kwargs)
if cleanup:
add_cleanup(channel.close)
return channel
return fabric
# noinspection PyTypeChecker
@pytest.fixture
async def connection(create_connection) -> aio_pika.Connection:
async with await create_connection() as conn:
yield conn
# noinspection PyTypeChecker
@pytest.fixture
async def channel(connection: aio_pika.Connection) -> aio_pika.Channel:
async with connection.channel() as ch:
yield ch
@pytest.fixture
def declare_queue(connection, channel, add_cleanup):
ch = channel
async def fabric(
*args, cleanup=True, channel=None, **kwargs
) -> aio_pika.Queue:
nonlocal ch, add_cleanup
if channel is None:
channel = ch
queue = await channel.declare_queue(*args, **kwargs)
if cleanup and not kwargs.get("auto_delete"):
add_cleanup(queue.delete)
return queue
return fabric
@pytest.fixture
def declare_exchange(connection, channel, add_cleanup):
ch = channel
async def fabric(
*args, channel=None, cleanup=True, **kwargs
) -> aio_pika.Exchange:
nonlocal ch, add_cleanup
if channel is None:
channel = ch
exchange = await channel.declare_exchange(*args, **kwargs)
if cleanup and not kwargs.get("auto_delete"):
add_cleanup(exchange.delete)
return exchange
return fabric
@pytest.fixture(autouse=True)
def memory_tracer():
tracemalloc.start()
tracemalloc.clear_traces()
filters = (
tracemalloc.Filter(True, aiormq.__file__),
tracemalloc.Filter(True, pamqp.__file__),
tracemalloc.Filter(True, aio_pika.__file__),
)
snapshot_before = tracemalloc.take_snapshot().filter_traces(filters)
try:
yield
with suppress(Exception):
gc.collect()
snapshot_after = tracemalloc.take_snapshot().filter_traces(filters)
top_stats = snapshot_after.compare_to(
snapshot_before, "lineno", cumulative=True,
)
assert not top_stats
finally:
tracemalloc.stop()
|
|
'''
***
Modified generic daemon class
***
Author: http://www.jejik.com/articles/2007/02/
a_simple_unix_linux_daemon_in_python/www.boxedice.com
License: http://creativecommons.org/licenses/by-sa/3.0/
Changes: 23rd Jan 2009 (David Mytton <[email protected]>)
- Replaced hard coded '/dev/null in __init__ with os.devnull
- Added OS check to conditionally remove code that doesn't
work on OS X
- Added output to console on completion
- Tidied up formatting
11th Mar 2009 (David Mytton <[email protected]>)
- Fixed problem with daemon exiting on Python 2.4
(before SystemExit was part of the Exception base)
13th Aug 2010 (David Mytton <[email protected]>
- Fixed unhandled exception if PID file is empty
'''
# Core modules
import atexit
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=os.devnull,
stdout=os.devnull, stderr=os.devnull,
home_dir='.', umask=022, verbose=1):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write(
"fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write(
"fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
if self.stderr:
se = file(self.stderr, 'a+', 0)
else:
se = so
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print "Started"
# Write pidfile
atexit.register(
self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args, **kwargs):
"""
Start the daemon
"""
if self.verbose >= 1:
print "Starting..."
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self):
"""
Stop the daemon
"""
if self.verbose >= 1:
print "Stopping..."
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is
# empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i = i + 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
if self.verbose >= 1:
print "Stopped"
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def get_pid(self):
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
def is_running(self):
pid = self.get_pid()
print(pid)
return pid and os.path.exists('/proc/%d' % pid)
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
"""
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Comment.jurisdiction'
db.add_column('website_comment', 'jurisdiction',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True),
keep_default=False)
# Adding field 'Comment.parent_comment'
db.add_column('website_comment', 'parent_comment',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='parent_reference', null=True, to=orm['website.Comment']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Comment.jurisdiction'
db.delete_column('website_comment', 'jurisdiction_id')
# Deleting field 'Comment.parent_comment'
db.delete_column('website_comment', 'parent_comment_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Rhys Elsmore <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""django-accept-header parser test suite."""
import unittest
from django_accept_header.header import parse, MediaType
from django_accept_header.exceptions import MediaTypeValueError, SubtypeValueError
class ParserTestCase(unittest.TestCase):
"""Parser test cases."""
def test_empty_value(self):
self.assertEquals(parse(''), [])
def test_none_value(self):
self.assertEquals(parse(None), [])
def test_parse_simple_header(self):
m = [MediaType('application/json')]
self.assertEquals(m, parse('application/json'))
def test_accept_header_still_included(self):
m = [MediaType('application/json')]
self.assertEquals(m, parse('Accept: application/json'))
def test_prefer_most_specific_type(self):
m = [
MediaType('application/json'),
MediaType('application/*', 0.2),
]
self.assertEquals(
parse('application/*; q=0.2, application/json'),
m
)
def test_media_type_parameter_with_quotes(self):
self.assertEquals(
parse('application/*; q="0.2"'),
[MediaType('application/*', 0.2)]
)
self.assertEquals(
parse("application/*; q='0.2'"),
[MediaType('application/*', 0.2)]
)
self.assertEquals(
parse('application/*; q=0.2; test="moop"'),
[MediaType('application/*', 0.2, {"test": "moop"})]
)
self.assertEquals(
parse("application/*; q=0.2; test='moop'"),
[MediaType('application/*', 0.2, {"test": "moop"})]
)
def test_special_characters(self):
self.assertEquals(
parse('application/*; test=_0-2'),
[MediaType('application/*', params={"test": "_0-2"})]
)
self.assertEquals(
parse("application/*; test=_0-2'"),
[MediaType('application/*', params={"test": "_0-2"})]
)
def test_non_valid_q_value(self):
self.assertEquals(
parse('application/*; q=_0-2'),
[MediaType('application/*', 1.0)]
)
def test_elaborate_accept_header(self):
self.assertEquals(
parse('text/*, text/html, text/html;level=1, */*'),
[
MediaType('text/html', params={'level': '1'}),
MediaType('text/html'),
MediaType('text/*'),
MediaType('*/*')
]
)
def test_real_world_header(self):
m = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.assertEquals(
parse(m),
[
MediaType('text/html'),
MediaType('application/xhtml+xml'),
MediaType('application/xml', q=0.9),
MediaType('*/*', q=0.8)
]
)
def test_parse_broken_accept_header(self):
header = ('text/xml,application/xml,application/xhtml+xml,') +\
('text/html;q=0.9,text/plain;q=0.8,image/*,,*/*;q=0.5')
self.assertEquals(
parse(header),
[
MediaType('text/xml'),
MediaType('application/xml'),
MediaType('application/xhtml+xml'),
MediaType('image/*'),
MediaType('text/html', q=0.9),
MediaType('text/plain', q=0.8),
MediaType('*/*', q=0.5)
]
)
class MediaTypeTestCase(unittest.TestCase):
"""Media Type test cases."""
def test_equal_media_types(self):
self.assertEquals(
MediaType('application/json'),
MediaType('application/json')
)
self.assertEquals(
MediaType('application/json', params={'test': '2'}),
MediaType('application/json', params={'test': '2'})
)
self.assertEquals(
MediaType('application/json'),
'application/json'
)
def test_unequal_media_types(self):
self.assertNotEquals(
MediaType('application/json'),
MediaType('text/plain')
)
self.assertNotEquals(
MediaType('application/json', params={'test': '2'}),
MediaType('text/plain', params={'test': '2'})
)
self.assertNotEquals(
MediaType('application/json'),
'text/plain'
)
def test_more_specific(self):
self.assertLess(
MediaType('application/*'),
MediaType('text/plain', q=0.8)
)
self.assertLess(
MediaType('application/json', params={'test': '2'}),
MediaType('application/*')
)
self.assertLess(
MediaType('application/json', q=0.5, params={'test': '2'}),
MediaType('application/json', q=0.5)
)
self.assertLess(
MediaType('application/json'),
MediaType('application/*')
)
self.assertLess(
MediaType('application/*'),
MediaType('*/*')
)
def test_less_specific(self):
self.assertGreater(
MediaType('text/plain', q=0.8),
MediaType('application/*')
)
self.assertGreater(
MediaType('application/*'),
MediaType('application/json', params={'test': '2'})
)
self.assertGreater(
MediaType('application/json', q=0.5),
MediaType('application/json', q=0.5, params={'test': '2'})
)
self.assertGreater(
MediaType('application/*'),
MediaType('application/json')
)
self.assertGreater(
MediaType('*/*'),
MediaType('application/*')
)
def test_matches_mediatypes(self):
ma = MediaType('*/*')
self.assertTrue(ma.matches('application/*'))
self.assertTrue(ma.matches('application/json'))
self.assertTrue(ma.matches('text/plain'))
def test_matches_mediatypes_specific(self):
ma = MediaType('text/*')
self.assertFalse(ma.matches('application/*'))
self.assertFalse(ma.matches('application/json'))
self.assertTrue(ma.matches('text/*'))
self.assertTrue(ma.matches('text/plain'))
def test_matches_subtypes(self):
ma = MediaType('image/png')
self.assertFalse(ma.matches('application/json'))
self.assertFalse(ma.matches('image/*'))
self.assertFalse(ma.matches('image/jpeg'))
self.assertTrue(ma.matches('image/png'))
def test_media_types(self):
self.assertEquals(
MediaType('application/json').mediatype,
'application'
)
self.assertEquals(
MediaType('application/*').mediatype,
'application'
)
self.assertEquals(
MediaType('*/*').mediatype,
'*'
)
def test_subtypes(self):
self.assertEquals(
MediaType('application/json').subtype,
'json'
)
self.assertEquals(
MediaType('application/*').subtype,
'*'
)
def test_invalid_media_types(self):
with self.assertRaises(MediaTypeValueError):
MediaType('/json')
with self.assertRaises(MediaTypeValueError):
MediaType('/')
def test_invalid_subtypes(self):
with self.assertRaises(SubtypeValueError):
MediaType('application/')
with self.assertRaises(SubtypeValueError):
MediaType('application')
def test_all_subtypes(self):
self.assertFalse(MediaType('application/json').all_subtypes)
self.assertTrue(MediaType('application/*').all_subtypes)
self.assertTrue(MediaType('*/*').all_subtypes)
def test_all_types(self):
self.assertFalse(MediaType('application/json').all_types)
self.assertFalse(MediaType('application/*').all_types)
self.assertTrue(MediaType('*/*').all_types)
def test_representation(self):
header = 'application/json; q=0.2; level=1; test=2; something=3'
m = parse(header)[0]
self.assertEquals(
repr(m),
'<Media Type: {}>'.format(header)
)
def test_string_representation(self):
header = 'application/json; q=0.2'
m = parse(header)[0]
self.assertEquals(
str(m),
header
)
def test_string_representation_parameter(self):
header = 'application/json; q=0.2; level=1; test=2; something=3'
m = parse(header)[0]
self.assertEquals(
str(m),
header
)
def test_getitem_param_exists(self):
m = MediaType('application/json', params={'test': '2'})
self.assertEqual(m['test'], '2')
def test_getitem_param_none(self):
m = MediaType('application/json')
self.assertIsNone(m['test'])
|
|
# internal imports
import ctable_ext
# external imports
import numpy as np
import bcolz
from collections import namedtuple
import os
class ctable(bcolz.ctable):
def cache_factor(self, col_list, refresh=False):
"""
Existing todos here are: these should be hidden helper carrays
As in: not normal columns that you would normally see as a user
The factor (label index) carray is as long as the original carray
(and the rest of the table therefore)
But the (unique) values carray is not as long (as long as the number
of unique values)
:param col_list:
:param refresh:
:return:
"""
if not self.rootdir:
raise TypeError('Only out-of-core ctables can have '
'factorization caching at the moment')
for col in col_list:
col_rootdir = self[col].rootdir
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
# create cache if needed
if refresh or not os.path.exists(col_factor_rootdir):
carray_factor = \
bcolz.carray([], dtype='int64', expectedlen=self.size,
rootdir=col_factor_rootdir, mode='w')
_, values = \
ctable_ext.factorize(self[col], labels=carray_factor)
carray_factor.flush()
carray_values = \
bcolz.carray(values.values(), dtype=self[col].dtype,
rootdir=col_values_rootdir, mode='w')
carray_values.flush()
def groupby(self, groupby_cols, agg_list, bool_arr=None, rootdir=None):
"""
Aggregate the ctable
groupby_cols: a list of columns to groupby over
agg_list: the aggregation operations, which can be:
- a straight forward sum of a list columns with a
similarly named output: ['m1', 'm2', ...]
- a list of new column input/output settings
[['mnew1', 'm1'], ['mnew2', 'm2], ...]
- a list that includes the type of aggregation for each column, i.e.
[['mnew1', 'm1', 'sum'], ['mnew2', 'm1, 'avg'], ...]
Currently supported aggregation operations are:
- sum
- sum_na (that checks for nan values and excludes them)
- To be added: mean, mean_na (and perhaps standard deviation etc)
boolarr: to be added (filtering the groupby factorization input)
rootdir: the aggregation ctable rootdir
"""
if not agg_list:
raise AttributeError('One or more aggregation operations '
'need to be defined')
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
factor_carray, nr_groups, skip_key = \
self.make_group_index(factor_list, values_list, groupby_cols,
len(self), bool_arr)
ct_agg, dtype_list, agg_ops = \
self.create_agg_ctable(groupby_cols, agg_list, nr_groups, rootdir)
# perform aggregation
ctable_ext.aggregate_groups_by_iter_2(self, ct_agg, nr_groups, skip_key,
factor_carray, groupby_cols,
agg_ops, dtype_list)
return ct_agg
# groupby helper functions
def factorize_groupby_cols(self, groupby_cols):
"""
:type self: ctable
"""
# first check if the factorized arrays already exist
# unless we need to refresh the cache
factor_list = []
values_list = []
# factorize the groupby columns
for col in groupby_cols:
cached = False
col_rootdir = self[col].rootdir
if col_rootdir:
col_factor_rootdir = col_rootdir + '.factor'
col_values_rootdir = col_rootdir + '.values'
if os.path.exists(col_factor_rootdir):
cached = True
col_factor_carray = \
bcolz.carray(rootdir=col_factor_rootdir, mode='r')
col_values_carray = \
bcolz.carray(rootdir=col_values_rootdir, mode='r')
if not cached:
col_factor_carray, values = ctable_ext.factorize(self[col])
col_values_carray = \
bcolz.carray(values.values(), dtype=self[col].dtype)
factor_list.append(col_factor_carray)
values_list.append(col_values_carray)
return factor_list, values_list
def make_group_index(self, factor_list, values_list, groupby_cols,
array_length, bool_arr):
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total (index 0/zero)
factor_carray = bcolz.zeros(array_length, dtype='int64')
values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
factor_carray = factor_list[0]
values = values_list[0]
else:
# multi column groupby
# nb: this might also be cached in the future
# first combine the factorized columns to single values
factor_set = {x: y for x, y in zip(groupby_cols, factor_list)}
# create a numexpr expression that calculates the place on
# a cartesian join index
eval_str = ''
previous_value = 1
for col, values \
in zip(reversed(groupby_cols), reversed(values_list)):
if eval_str:
eval_str += ' + '
eval_str += str(previous_value) + '*' + col
previous_value *= len(values)
# calculate the cartesian group index for each row
factor_input = bcolz.eval(eval_str, user_dict=factor_set)
# now factorize the unique groupby combinations
factor_carray, values = ctable_ext.factorize(factor_input)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
factor_carray = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': factor_carray, 'bool': bool_arr})
# now check how many unique values there are left
factor_carray, values = ctable_ext.factorize(factor_carray)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.iteritems() if value == -1]
if filter_check:
skip_key = filter_check[0]
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
nr_groups = len(values)
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return factor_carray, nr_groups, skip_key
def create_agg_ctable(self, groupby_cols, agg_list, nr_groups, rootdir):
# create output table
dtype_list = []
for col in groupby_cols:
dtype_list.append((col, self[col].dtype))
agg_cols = []
agg_ops = []
op_translation = {
'sum': 1,
'sum_na': 2
}
for agg_info in agg_list:
if not isinstance(agg_info, list):
# straight forward sum (a ['m1', 'm2', ...] parameter)
output_col = agg_info
input_col = agg_info
agg_op = 1
else:
# input/output settings [['mnew1', 'm1'], ['mnew2', 'm2], ...]
output_col = agg_info[0]
input_col = agg_info[1]
if len(agg_info) == 2:
agg_op = 1
else:
# input/output settings [['mnew1', 'm1', 'sum'], ['mnew2', 'm1, 'avg'], ...]
agg_op = agg_info[2]
if agg_op not in op_translation:
raise NotImplementedError(
'Unknown Aggregation Type: ' + unicode(agg_op))
agg_op = op_translation[agg_op]
col_dtype = self[input_col].dtype
# TODO: check if the aggregation columns is numeric
# NB: we could build a concatenation for strings like pandas, but I would really prefer to see that as a
# separate operation
# save output
agg_cols.append(output_col)
agg_ops.append((input_col, agg_op))
dtype_list.append((output_col, col_dtype))
# create aggregation table
ct_agg = bcolz.ctable(
np.zeros(0, dtype_list),
expectedlen=nr_groups,
rootdir=rootdir)
return ct_agg, dtype_list, agg_ops
def where_terms(self, term_list):
"""
TEMPORARY WORKAROUND TILL NUMEXPR WORKS WITH IN
where_terms(term_list, outcols=None, limit=None, skip=0)
Iterate over rows where `term_list` is true.
A terms list has a [(col, operator, value), ..] construction.
Eg. [('sales', '>', 2), ('state', 'in', ['IL', 'AR'])]
:param term_list:
:param outcols:
:param limit:
:param skip:
:return: :raise ValueError:
"""
if type(term_list) not in [list, set, tuple]:
raise ValueError("Only term lists are supported")
eval_string = ''
eval_list = []
for term in term_list:
filter_col = term[0]
filter_operator = term[1].lower()
filter_value = term[2]
if filter_operator not in ['in', 'not in']:
# direct filters should be added to the eval_string
# add and logic if not the first term
if eval_string:
eval_string += ' & '
eval_string += '(' + filter_col + ' ' \
+ filter_operator + ' ' \
+ str(filter_value) + ')'
elif filter_operator in ['in', 'not in']:
# Check input
if type(filter_value) not in [list, set, tuple]:
raise ValueError("In selections need lists, sets or tuples")
if len(filter_value) < 1:
raise ValueError("A value list needs to have values")
elif len(filter_value) == 1:
# handle as eval
# add and logic if not the first term
if eval_string:
eval_string += ' & '
if filter_operator == 'not in':
filter_operator = '!='
else:
filter_operator = '=='
eval_string += '(' + filter_col + ' ' + \
filter_operator
filter_value = filter_value[0]
if type(filter_value) == str:
filter_value = '"' + filter_value + '"'
else:
filter_value = str(filter_value)
eval_string += filter_value + ') '
else:
if type(filter_value) in [list, tuple]:
filter_value = set(filter_value)
eval_list.append(
(filter_col, filter_operator, filter_value)
)
else:
raise ValueError(
"Input not correctly formatted for eval or list filtering"
)
# (1) Evaluate terms in eval
# return eval_string, eval_list
if eval_string:
boolarr = self.eval(eval_string)
if eval_list:
# convert to numpy array for array_is_in
boolarr = boolarr[:]
else:
boolarr = np.ones(self.size, dtype=bool)
# (2) Evaluate other terms like 'in' or 'not in' ...
for term in eval_list:
name = term[0]
col = self.cols[name]
operator = term[1]
if operator.lower() == 'not in':
reverse = True
elif operator.lower() == 'in':
reverse = False
else:
raise ValueError(
"Input not correctly formatted for list filtering"
)
value_set = set(term[2])
ctable_ext.carray_is_in(col, value_set, boolarr, reverse)
if eval_list:
# convert boolarr back to carray
boolarr = bcolz.carray(boolarr)
return boolarr
|
|
from hashlib import md5
from django.conf import settings
from django.db import models
from django.core import paginator
from django.core.cache import cache
from djangae.contrib.pagination.decorators import _field_name_for_ordering
from djangae.db.backends.appengine.query import extract_ordering
# TODO: it would be nice to be able to define a function which is given the queryset and returns
# the cache time. That would allow different cache times for different queries.
CACHE_TIME = getattr(settings, "DJANGAE_PAGINATION_CACHE_TIME", 30*60)
class PaginationOrderingRequired(RuntimeError):
pass
def _marker_cache_key(query_id, page_number):
cache_key = "_PAGE_MARKER_{}:{}".format(query_id, page_number)
return cache_key
def _count_cache_key(query_id):
cache_key = "_PAGE_COUNTER_{}".format(query_id)
return cache_key
def _update_known_count(query_id, count):
cache_key = _count_cache_key(query_id)
ret = cache.get(cache_key)
if ret and ret > count:
return
cache.set(cache_key, count, CACHE_TIME)
def _get_known_count(query_id):
cache_key = _count_cache_key(query_id)
ret = cache.get(cache_key)
if ret:
return ret
return 0
def _store_marker(query_id, page_number, marker_value):
"""
For a model and query id, stores the marker value for previously
queried page number.
This stores the last item on the page identified by page number,
not the marker that starts the page. i.e. there is a marker for page 1
"""
cache_key = _marker_cache_key(query_id, page_number)
cache.set(cache_key, marker_value, CACHE_TIME)
def _get_marker(query_id, page_number):
"""
For a query_id, returns the marker at the end of the
previous page. Returns a tuple of (marker, pages) where pages is
the number of pages we had to go back to find the marker (this is the
number of pages we need to skip in the result set)
"""
counter = page_number - 1
pages_skipped = 0
while counter > 0:
cache_key = _marker_cache_key(query_id, counter)
ret = cache.get(cache_key)
if ret:
return ret, pages_skipped
counter -= 1
pages_skipped += 1
# If we get here then we couldn't find a stored marker anywhere
return None, pages_skipped
def queryset_identifier(queryset):
""" Returns a string that uniquely identifies this query excluding its low and high mark"""
hasher = md5()
hasher.update(queryset.model._meta.db_table)
hasher.update(str(queryset.query.where))
hasher.update(str(queryset.query.order_by))
return hasher.hexdigest()
class Paginator(paginator.Paginator):
"""
A paginator that works with the @paginated_model class decorator to efficiently
return paginated sets on the appengine datastore
"""
def __init__(
self,
object_list,
per_page,
readahead=10,
allow_empty_first_page=True,
**kwargs
):
if not object_list.ordered:
object_list.order_by("pk") # Just order by PK by default
self.original_orderings = extract_ordering(object_list.query)
self.field_required = _field_name_for_ordering(self.original_orderings[:])
self.readahead = readahead
self.allow_empty_first_page = allow_empty_first_page
try:
object_list.model._meta.get_field(self.field_required)
except models.FieldDoesNotExist:
raise PaginationOrderingRequired(
"No pagination ordering specified for {}. Field required: {}".format(
self.original_orderings,
self.field_required,
)
)
# Wipe out the existing ordering
object_list = object_list.order_by()
# Add our replacement ordering
# A single negated ordering can use the same field (we just flip the query), this
# normalisation happens in _field_name_for_ordering, so we do the same here.
if len(self.original_orderings) == 1 and self.original_orderings[0].startswith("-"):
object_list = object_list.order_by("-" + self.field_required)
else:
object_list = object_list.order_by(self.field_required)
self.queryset_id = queryset_identifier(object_list)
super(Paginator, self).__init__(
object_list,
per_page,
allow_empty_first_page=allow_empty_first_page,
**kwargs
)
@property
def count(self):
return _get_known_count(self.queryset_id)
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise paginator.PageNotAnInteger('That page number is not an integer')
if number < 1:
raise paginator.EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
marker_value, pages = _get_marker(
self.queryset_id,
number
)
if marker_value:
if len(self.original_orderings) == 1 and self.original_orderings[0].startswith("-"):
qs = self.object_list.all().filter(**{"{}__lt".format(self.field_required): marker_value})
else:
qs = self.object_list.all().filter(**{"{}__gt".format(self.field_required): marker_value})
bottom = pages * self.per_page # We have to skip the pages here
top = bottom + self.per_page
else:
qs = self.object_list
results = list(qs[bottom:top + (self.per_page * self.readahead)])
next_page = results[top:]
next_page_counter = number + 1
while next_page:
if len(next_page) > self.per_page-1:
index = self.per_page-1
else:
index = len(next_page)-1
_store_marker(
self.queryset_id,
next_page_counter,
getattr(next_page[index], self.field_required)
)
next_page_counter += 1
next_page = next_page[self.per_page:]
if not results and not self.allow_empty_first_page:
raise paginator.EmptyPage("That page contains no results")
known_count = ((number - 1) * self.per_page) + len(results)
_update_known_count(self.queryset_id, known_count)
page = self._get_page(results[:self.per_page], number, self)
if len(page.object_list) > self.per_page-1:
index = self.per_page-1
else:
index = len(page.object_list)-1
if results:
_store_marker(
self.queryset_id,
number,
getattr(page.object_list[index], self.field_required)
)
return page
|
|
#!/usr/bin/env ipython
# dotfiles.venv.ipython_magics
from __future__ import print_function
"""
IPython ``%magic`` commands
* ``cd`` aliases
* ``ds`` (``dotfiles_status``)
* ``dr`` (``dotfiles_reload``)
Installation
--------------
.. code-block:: bash
__DOTFILES="${HOME}/-dotfiles"
ipython_profile="profile_default"
ln -s ${__DOTFILES}/etc/ipython/ipython_magics.py \
~/.ipython/${ipython_profile}/startup/ipython_magics.py
"""
import os
try:
from IPython.core.magic import (Magics, magics_class, line_magic)
except ImportError:
print("ImportError: IPython")
# Mock IPython for building docs
Magics = object
magics_class = lambda cls, *args, **kwargs: cls
line_magic = lambda func, *args, **kwargs: func
@magics_class
class VenvMagics(Magics):
def cd(self, envvar, line):
"""
Change directory
Args:
envvar (str): os.environ variable name
line (str): path to append to envvar
"""
prefix = os.environ.get(envvar, "")
path = os.path.join(prefix, line.lstrip('/\\'))
return self.shell.magic('cd %s' % repr(unicode(path))[1:])
@line_magic
def cdhome(self, line):
"""cdhome -- cd $HOME/${@}"""
return self.cd('HOME', line)
@line_magic
def cdh(self, line):
"""cdh -- cd $HOME/${@}"""
return self.cd('HOME', line)
@line_magic
def cdwrk(self, line):
"""cdwrk -- cd $__WRK/${@}"""
return self.cd('__WRK', line)
@line_magic
def cddotfiles(self, line):
"""cddotfiles -- cd $__DOTFILES/${@}"""
return self.cd('__DOTFILES', line)
@line_magic
def cdd(self, line):
"""cdd -- cd $__DOTFILES/${@}"""
return self.cd('__DOTFILES', line)
@line_magic
def cdprojecthome(self, line):
"""cdprojecthome -- cd $PROJECT_HOME/${@}"""
return self.cd('PROJECT_HOME', line)
@line_magic
def cdp(self, line):
"""cdp -- cd $PROJECT_HOME/${@}"""
return self.cd('PROJECT_HOME', line)
@line_magic
def cdph(self, line):
"""cdph -- cd $PROJECT_HOME/${@}"""
return self.cd('PROJECT_HOME', line)
@line_magic
def cdworkonhome(self, line):
"""cdworkonhome -- cd $WORKON_HOME/${@}"""
return self.cd('WORKON_HOME', line)
@line_magic
def cdwh(self, line):
"""cdwh -- cd $WORKON_HOME/${@}"""
return self.cd('WORKON_HOME', line)
@line_magic
def cdve(self, line):
"""cdve -- cd $WORKON_HOME/${@}"""
return self.cd('WORKON_HOME', line)
@line_magic
def cdcondaenvspath(self, line):
"""cdcondaenvspath -- cd $CONDA_ENVS_PATH/${@}"""
return self.cd('CONDA_ENVS_PATH', line)
@line_magic
def cda(self, line):
"""cda -- cd $CONDA_ENVS_PATH/${@}"""
return self.cd('CONDA_ENVS_PATH', line)
@line_magic
def cdce(self, line):
"""cdce -- cd $CONDA_ENVS_PATH/${@}"""
return self.cd('CONDA_ENVS_PATH', line)
@line_magic
def cdvirtualenv(self, line):
"""cdvirtualenv -- cd $VIRTUAL_ENV/${@}"""
return self.cd('VIRTUAL_ENV', line)
@line_magic
def cdv(self, line):
"""cdv -- cd $VIRTUAL_ENV/${@}"""
return self.cd('VIRTUAL_ENV', line)
@line_magic
def cdsrc(self, line):
"""cdsrc -- cd $_SRC/${@}"""
return self.cd('_SRC', line)
@line_magic
def cds(self, line):
"""cds -- cd $_SRC/${@}"""
return self.cd('_SRC', line)
@line_magic
def cdwrd(self, line):
"""cdwrd -- cd $_WRD/${@}"""
return self.cd('_WRD', line)
@line_magic
def cdw(self, line):
"""cdw -- cd $_WRD/${@}"""
return self.cd('_WRD', line)
@line_magic
def cdbin(self, line):
"""cdbin -- cd $_BIN/${@}"""
return self.cd('_BIN', line)
@line_magic
def cdb(self, line):
"""cdb -- cd $_BIN/${@}"""
return self.cd('_BIN', line)
@line_magic
def cdetc(self, line):
"""cdetc -- cd $_ETC/${@}"""
return self.cd('_ETC', line)
@line_magic
def cde(self, line):
"""cde -- cd $_ETC/${@}"""
return self.cd('_ETC', line)
@line_magic
def cdlib(self, line):
"""cdlib -- cd $_LIB/${@}"""
return self.cd('_LIB', line)
@line_magic
def cdl(self, line):
"""cdl -- cd $_LIB/${@}"""
return self.cd('_LIB', line)
@line_magic
def cdlog(self, line):
"""cdlog -- cd $_LOG/${@}"""
return self.cd('_LOG', line)
@line_magic
def cdpylib(self, line):
"""cdpylib -- cd $_PYLIB/${@}"""
return self.cd('_PYLIB', line)
@line_magic
def cdpysite(self, line):
"""cdpysite -- cd $_PYSITE/${@}"""
return self.cd('_PYSITE', line)
@line_magic
def cdsitepackages(self, line):
"""cdsitepackages -- cd $_PYSITE/${@}"""
return self.cd('_PYSITE', line)
@line_magic
def cdvar(self, line):
"""cdvar -- cd $_VAR/${@}"""
return self.cd('_VAR', line)
@line_magic
def cdwww(self, line):
"""cdwww -- cd $_WWW/${@}"""
return self.cd('_WWW', line)
@line_magic
def cdww(self, line):
"""cdww -- cd $_WWW/${@}"""
return self.cd('_WWW', line)
@line_magic
def cdhelp(self, line):
"""cdhelp() -- list cd commands"""
for cdfunc in dir(self):
if cdfunc.startswith('cd') and cdfunc not in ('cdhelp','cd'):
docstr = getattr(self, cdfunc).__doc__.split('--',1)[-1].strip()
print("%%%-16s -- %s" % (cdfunc, docstr))
@staticmethod
def _dotfiles_status():
"""
Print ``dotfiles_status``: venv variables
"""
env_vars = [
'HOSTNAME',
'USER',
'PROJECT_HOME',
'WORKON_HOME',
'VIRTUAL_ENV_NAME',
'VIRTUAL_ENV',
'_USRLOG',
'_TERM_ID',
'_SRC',
'_APP',
'_WRD',
'PATH',
'__DOTFILES',
]
environ = dict((var, os.environ.get(var)) for var in env_vars)
environ['HOSTNAME'] = __import__('socket').gethostname()
for var in env_vars:
print('{}="{}"'.format(var, "%s" % environ.get(var,'')))
@line_magic
def dotfiles_status(self, line):
"""dotfiles_status() -- print dotfiles_status() ."""
return self._dotfiles_status()
@line_magic
def ds(self, line):
"""ds() -- print dotfiles_status() ."""
return self._dotfiles_status()
@staticmethod
def _dotfiles_reload():
"""_dotfiles_reload() -- print NotImplemented"""
print("NotImplemented: dotfiles_reload()")
@line_magic
def dotfiles_reload(self, line):
"""dotfiles_reload() -- print NotImplemented"""
return self._dotfiles_reload()
@line_magic
def dr(self, line):
"""dr() -- print NotImplemented [dotfiles_reload()]"""
return self._dotfiles_reload()
def main():
"""
Register VenvMagics with IPython
"""
import IPython
ip = IPython.get_ipython()
ip.register_magics(VenvMagics)
if __name__ == "__main__":
main()
|
|
__author__ = 'Copyright (c) 2013 Alan Yorinks All rights reserved.'
"""
Copyright (c) 2013-15 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from collections import deque
import threading
import sys
import time
from .pymata_serial import PyMataSerial
from .pymata_command_handler import PyMataCommandHandler
# For report data formats refer to http://firmata.org/wiki/Protocol
class NoACK(Exception):
pass
# noinspection PyPep8
class PyMata:
"""
This class contains the complete set of API methods that permit control of an Arduino
Micro-Controller utilizing Firmata or its derivatives.
For information about the Firmata protocol, refer to: http://firmata.org/wiki/Protocol
"""
# some state variables
HIGH = 1 # digital pin state high value
LOW = 0 # digital pin state low value
REPORTING_ENABLE = 1 # enable reporting for REPORT_ANALOG or REPORT_DIGITAL message sent to firmata
REPORTING_DISABLE = 0 # disable reporting for REPORT_ANALOG or REPORT_DIGITAL message sent to firmata
# Shared Resources - data structures, controlling mechanisms, and reference variables
# Commands and data received from Firmata via the serial interface are placed into the command deque.
# The pymata_command_handler class removes and processes this information.
command_deque = deque()
# This is the instance reference to the communications port object
arduino = None
# This is a thread lock to assure data integrity when reading or writing to the data response tables
# (defined in the CommandHandler class). It shared by the pymata class and the pymata_command_handler class.
data_lock = threading.RLock()
# This is the instance reference to the _command_handler
_command_handler = None
# verbose can be set to false to suppress output to the console when instantiating PyMata
verbose = True
# pin modes
INPUT = 0x00 # pin set as input
OUTPUT = 0x01 # pin set as output
ANALOG = 0x02 # analog pin in analogInput mode
PWM = 0x03 # digital pin in PWM output mode
SERVO = 0x04 # digital pin in Servo output mode
I2C = 0x06 # pin included in I2C setup
ONEWIRE = 0x07 # possible future feature
STEPPER = 0x08 # any pin in stepper mode
TONE = 0x09 # Any pin in TONE mode
ENCODER = 0x0a
SONAR = 0x0b # Any pin in SONAR mode
IGNORE = 0x7f
LATCH_MODE = 0xE0 # this value is or'ed with pin modes for latched data callback
# the following pin modes are not part of or defined by Firmata
# but used by PyMata
DIGITAL = 0x20
# I2C command operation modes
I2C_WRITE = 0B00000000
I2C_READ = 0B00001000
I2C_READ_CONTINUOUSLY = 0B00010000
I2C_STOP_READING = 0B00011000
I2C_READ_WRITE_MODE_MASK = 0B00011000
# Tone commands
TONE_TONE = 0 # play a tone
TONE_NO_TONE = 1 # turn off tone
# Stepper Motor Sub-commands
STEPPER_CONFIGURE = 0 # configure a stepper motor for operation
STEPPER_STEP = 1 # command a motor to move at the provided speed
STEPPER_LIBRARY_VERSION = 2 # used to get stepper library version number
# each byte represents a digital port and its value contains the current port settings
digital_output_port_pins = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
# noinspection PyPep8Naming
def __init__(self, port_id='/dev/ttyACM0', bluetooth=True, verbose=True, on_disconnected_cb=None):
"""
The "constructor" instantiates the entire interface. It starts the operational threads for the serial
interface as well as for the command handler.
@param port_id: Communications port specifier (COM3, /dev/ttyACM0, etc)
@param bluetooth: Sets start up delays for bluetooth connectivity. Set to False for faster start up.
@param verbose: If set to False, the status print statements are suppressed.
"""
# Currently only serial communication over USB is supported, but in the future
# wifi and other transport mechanism support is anticipated
try:
# save the user's request if specified
self.verbose = verbose
if self.verbose:
print("\nPython Version %s" % sys.version)
print('\nPyMata version 2.05 Copyright(C) 2013-15 Alan Yorinks All rights reserved.')
# Instantiate the serial support class
self.transport = PyMataSerial(port_id, self.command_deque, on_disconnected_cb)
# wait for HC-06 Bluetooth slave to initialize in case it is being used.
if bluetooth:
time.sleep(5)
# Attempt opening communications with the Arduino micro-controller
self.transport.open(self.verbose)
# additional wait for HC-06 if it is being used
if bluetooth:
time.sleep(2)
else:
# necessary to support Arduino Mega
time.sleep(1)
# Start the data receive thread
self.transport.start()
# Instantiate the command handler
self._command_handler = PyMataCommandHandler(self)
self._command_handler.system_reset()
########################################################################
# constants defined locally from values contained in the command handler
########################################################################
# Data latch state constants to be used when accessing data returned from get_latch_data methods.
# The get_latch data methods return [pin_number, latch_state, latched_data, time_stamp]
# These three constants define possible values for the second item in the list, latch_state
# this pin will be ignored for latching - table initialized with this value
self.LATCH_IGNORE = self._command_handler.LATCH_IGNORE
# When the next pin value change is received for this pin, if it matches the latch criteria
# the data will be latched.
self.LATCH_ARMED = self._command_handler.LATCH_ARMED
# Data has been latched. Read the data to re-arm the latch.
self.LATCH_LATCHED = self._command_handler.LATCH_LATCHED
#
# These constants are used when setting a data latch.
# Latch threshold types
#
self.DIGITAL_LATCH_HIGH = self._command_handler.DIGITAL_LATCH_HIGH
self.DIGITAL_LATCH_LOW = self._command_handler.DIGITAL_LATCH_LOW
self.ANALOG_LATCH_GT = self._command_handler.ANALOG_LATCH_GT
self.ANALOG_LATCH_LT = self._command_handler.ANALOG_LATCH_LT
self.ANALOG_LATCH_GTE = self._command_handler.ANALOG_LATCH_GTE
self.ANALOG_LATCH_LTE = self._command_handler.ANALOG_LATCH_LTE
# constants to be used to parse the data returned from calling
# get_X_latch_data()
self.LATCH_PIN = 0
self.LATCH_STATE = 1
self.LATCHED_DATA = 2
self.LATCHED_TIME_STAMP = 3
# Start the command processing thread
self._command_handler.start()
# Command handler should now be prepared to receive replies from the Arduino, so go ahead
# detect the Arduino board
if self.verbose:
print('\nPlease wait while Arduino is being detected. This can take up to 30 seconds ...')
# perform board auto discovery
if not self._command_handler.auto_discover_board(self.verbose):
# board was not found so shutdown
if self.verbose:
print("Board Auto Discovery Failed!, Shutting Down")
self._command_handler.stop()
self.transport.stop()
self._command_handler.join()
self.transport.join()
# time.sleep(2)
raise NoACK('Failed to verify board')
except KeyboardInterrupt:
if self.verbose:
print("Program Aborted Before PyMata Instantiated")
sys.exit()
def analog_mapping_query(self):
"""
Send an analog mapping query message via sysex. Client retrieves the results with a
call to get_analog_mapping_request_results()
"""
self._command_handler.send_sysex(self._command_handler.ANALOG_MAPPING_QUERY, None)
def analog_read(self, pin):
"""
Retrieve the last analog data value received for the specified pin.
@param pin: Selected pin
@return: The last value entered into the analog response table.
"""
with self.data_lock:
data = self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE]
return data
def analog_write(self, pin, value):
"""
Set the specified pin to the specified value.
@param pin: Pin number
@param value: Pin value
@return: No return value
"""
if self._command_handler.ANALOG_MESSAGE + pin < 0xf0:
command = [self._command_handler.ANALOG_MESSAGE + pin, value & 0x7f, value >> 7]
self._command_handler.send_command(command)
else:
self.extended_analog(pin, value)
def capability_query(self):
"""
Send a Firmata capability query message via sysex. Client retrieves the results with a
call to get_capability_query_results()
The Arduino can be rather slow in responding to this command. For
the Mega 2560 R3 it has taken up to 25 seconds for a response.
"""
self._command_handler.send_sysex(self._command_handler.CAPABILITY_QUERY, None)
def close(self):
"""
This method will close the transport (serial port) and exit
@return: No return value, but sys.exit(0) is called.
"""
# self._command_handler.system_reset()
self._command_handler.stop()
self.transport.stop()
self.transport.close()
if self.verbose:
print("PyMata close(): Calling sys.exit(0): Hope to see you soon!")
# sys.exit(0)
def digital_read(self, pin):
"""
Retrieve the last digital data value received for the specified pin.
NOTE: This command will return values for digital, pwm, etc, pin types
@param pin: Selected pin
@return: The last value entered into the digital response table.
"""
with self.data_lock:
data = \
self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE]
return data
def digital_write(self, pin, value):
"""
Set the specified pin to the specified value.
@param pin: pin number
@param value: pin value
@return: No return value
"""
# The command value is not a fixed value, but needs to be calculated using the
# pin's port number
#
#
port = pin // 8
calculated_command = self._command_handler.DIGITAL_MESSAGE + port
mask = 1 << (pin % 8)
# Calculate the value for the pin's position in the port mask
if value == 1:
self.digital_output_port_pins[port] |= mask
else:
self.digital_output_port_pins[port] &= ~mask
# Assemble the command
command = (calculated_command, self.digital_output_port_pins[port] & 0x7f,
self.digital_output_port_pins[port] >> 7)
self._command_handler.send_command(command)
def disable_analog_reporting(self, pin):
"""
Disables analog reporting for a single analog pin.
@param pin: Analog pin number. For example for A0, the number is 0.
@return: No return value
"""
command = [self._command_handler.REPORT_ANALOG + pin, self.REPORTING_DISABLE]
self._command_handler.send_command(command)
def disable_digital_reporting(self, pin):
"""
Disables digital reporting. By turning reporting off for this pin, reporting
is disabled for all 8 bits in the "port" -
@param pin: Pin and all pins for this port
@return: No return value
"""
port = pin // 8
command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_DISABLE]
self._command_handler.send_command(command)
def enable_analog_reporting(self, pin):
"""
Enables analog reporting. By turning reporting on for a single pin,
@param pin: Analog pin number. For example for A0, the number is 0.
@return: No return value
"""
command = [self._command_handler.REPORT_ANALOG + pin, self.REPORTING_ENABLE]
self._command_handler.send_command(command)
def enable_digital_reporting(self, pin):
"""
Enables digital reporting. By turning reporting on for all 8 bits in the "port" -
this is part of Firmata's protocol specification.
@param pin: Pin and all pins for this port
@return: No return value
"""
port = pin // 8
command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_ENABLE]
self._command_handler.send_command(command)
def encoder_config(self, pin_a, pin_b, cb=None):
"""
This command enables the rotary encoder (2 pin + ground) and will
enable encoder reporting.
NOTE: This command is not currently part of standard arduino firmata, but is provided for legacy
support of CodeShield on an Arduino UNO.
Encoder data is retrieved by performing a digital_read from pin a (encoder pin 1)
@param pin_a: Encoder pin 1.
@param pin_b: Encoder pin 2.
@param cb: callback function to report encoder changes
@return: No return value
"""
data = [pin_a, pin_b]
self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_MODE] \
= self.ENCODER
self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb
self.enable_digital_reporting(pin_a)
self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_MODE] \
= self.ENCODER
self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb
self.enable_digital_reporting(pin_b)
self._command_handler.send_sysex(self._command_handler.ENCODER_CONFIG, data)
def extended_analog(self, pin, data):
"""
This method will send an extended data analog output command to the selected pin
@param pin: 0 - 127
@param data: 0 - 0xfffff
"""
analog_data = [pin, data & 0x7f, (data >> 7) & 0x7f, data >> 14]
self._command_handler.send_sysex(self._command_handler.EXTENDED_ANALOG, analog_data)
def get_analog_latch_data(self, pin):
"""
A list is returned containing the latch state for the pin, the latched value, and the time stamp
[pin_num, latch_state, latched_value, time_stamp]
If the the latch state is LATCH_LATCHED, the table is reset (data and timestamp set to zero)
@param pin: Pin number.
@return: [pin, latch_state, latch_data_value, time_stamp]
"""
return self._command_handler.get_analog_latch_data(pin)
def get_analog_mapping_request_results(self):
"""
Call this method after calling analog_mapping_query() to retrieve its results
@return: raw data returned by firmata
"""
return self._command_handler.analog_mapping_query_results
def get_analog_response_table(self):
"""
This method returns a list of lists representing the current pin mode and
associated data values for all analog pins.
All configured pin types, both input and output will be listed. Output pin data will contain zero.
@return: The last update of the digital response table
"""
return self._command_handler.get_analog_response_table()
def get_capability_query_results(self):
"""
Retrieve the data returned by a previous call to capability_query()
@return: Raw capability data returned by firmata
"""
return self._command_handler.capability_query_results
def get_digital_latch_data(self, pin):
"""
A list is returned containing the latch state for the pin, the latched value, and the time stamp
[pin_num, latch_state, latched_value, time_stamp]
If the the latch state is LATCH_LATCHED, the table is reset (data and timestamp set to zero)
@param pin: Pin number.
@return: [pin, latch_state, latch_data_value, time_stamp]
"""
return self._command_handler.get_digital_latch_data(pin)
def get_digital_response_table(self):
"""
This method returns a list of lists representing the current pin mode
and associated data for all digital pins.
All pin types, both input and output will be listed. Output pin data will contain zero.
@return: The last update of the digital response table
"""
return self._command_handler.get_digital_response_table()
def get_firmata_version(self):
"""
Retrieve the firmata version information returned by a previous call to refresh_report_version()
@return: Firmata_version list [major, minor] or None
"""
return self._command_handler.firmata_version
def get_firmata_firmware_version(self):
"""
Retrieve the firmware id information returned by a previous call to refresh_report_firmware()
@return: Firmata_firmware list [major, minor, file_name] or None
"""
return self._command_handler.firmata_firmware
def get_pin_state_query_results(self):
"""
This method returns the results of a previous call to pin_state_query() and then resets
the pin state query data to None
@return: Raw pin state query data
"""
r_data = self._command_handler.last_pin_query_results
self._command_handler.last_pin_query_results = []
return r_data
# noinspection PyMethodMayBeStatic
def get_pymata_version(self):
"""
Returns the PyMata version number in a list: [Major Number, Minor Number]
@return:
"""
return [1, 57]
# noinspection PyMethodMayBeStatic
def get_sonar_data(self):
"""
Retrieve Ping (HC-SR04 type) data. The data is presented as a dictionary.
The 'key' is the trigger pin specified in sonar_config() and the 'data' is the
current measured distance (in centimeters)
for that pin. If there is no data, the value is set to IGNORE (127).
@return: active_sonar_map
"""
return self._command_handler.active_sonar_map
def get_stepper_version(self, timeout=20):
"""
@param timeout: specify a time to allow arduino to process and return a version
@return: the stepper version number if it was set.
"""
# get current time
start_time = time.time()
# wait for up to 20 seconds for a successful capability query to occur
while self._command_handler.stepper_library_version <= 0:
if time.time() - start_time > timeout:
if self.verbose is True:
print("Stepper Library Version Request timed-out. "
"Did you send a stepper_request_library_version command?")
return
else:
pass
return self._command_handler.stepper_library_version
def i2c_config(self, read_delay_time=0, pin_type=None, clk_pin=0, data_pin=0):
"""
NOTE: THIS METHOD MUST BE CALLED BEFORE ANY I2C REQUEST IS MADE
This method initializes Firmata for I2c operations.
It allows setting of a read time delay amount, and to optionally track
the pins as I2C in the appropriate response table.
To track pins: Set the pin_type to ANALOG or DIGITAL and provide the pin numbers.
If using ANALOG, pin numbers use the analog number, for example A4: use 4.
@param read_delay_time: an optional parameter, default is 0
@param pin_type: ANALOG or DIGITAL to select response table type to track pin numbers
@param clk_pin: pin number (see comment above).
@param data_pin: pin number (see comment above).
@return: No Return Value
"""
data = [read_delay_time & 0x7f, read_delay_time >> 7]
self._command_handler.send_sysex(self._command_handler.I2C_CONFIG, data)
# If pin type is set, set pin mode in appropriate response table for these pins
if pin_type:
if pin_type == self.DIGITAL:
self._command_handler.digital_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE] \
= self.I2C
self._command_handler.digital_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE] \
= self.I2C
else:
self._command_handler.analog_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE] \
= self.I2C
self._command_handler.analog_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE] \
= self.I2C
def i2c_read(self, address, register, number_of_bytes, read_type, cb=None):
"""
This method requests the read of an i2c device. Results are retrieved by a call to
i2c_get_read_data().
If a callback method is provided, when data is received from the device it will be sent to the callback method
@param address: i2c device address
@param register: register number (can be set to zero)
@param number_of_bytes: number of bytes expected to be returned
@param read_type: I2C_READ or I2C_READ_CONTINUOUSLY
@param cb: Optional callback function to report i2c data as result of read command
"""
data = [address, read_type, register & 0x7f, register >> 7,
number_of_bytes & 0x7f, number_of_bytes >> 7]
# add or update entry in i2c_map for reply
self._command_handler.i2c_map[address] = [cb, None]
self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)
def i2c_write(self, address, *args):
"""
Write data to an i2c device.
@param address: i2c device address
@param args: A variable number of bytes to be sent to the device
"""
data = [address, self.I2C_WRITE]
for item in args:
data.append(item)
self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)
def i2c_stop_reading(self, address):
"""
This method stops an I2C_READ_CONTINUOUSLY operation for the i2c device address specified.
@param address: address of i2c device
"""
data = [address, self.I2C_STOP_READING]
self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)
def i2c_get_read_data(self, address):
"""
This method retrieves the i2c read data as the result of an i2c_read() command.
@param address: i2c device address
@return: raw data read from device
"""
if address in self._command_handler.i2c_map:
map_entry = self._command_handler.i2c_map[address]
return map_entry[1]
def pin_state_query(self, pin):
"""
This method issues a pin state query command. Data returned is retrieved via
a call to get_pin_state_query_results()
@param pin: pin number
"""
self._command_handler.send_sysex(self._command_handler.PIN_STATE_QUERY, [pin])
def play_tone(self, pin, tone_command, frequency, duration):
"""
This method will call the Tone library for the selected pin.
If the tone command is set to TONE_TONE, then the specified tone will be played.
Else, if the tone command is TONE_NO_TONE, then any currently playing tone will be disabled.
It is intended for a future release of Arduino Firmata
@param pin: Pin number
@param tone_command: Either TONE_TONE, or TONE_NO_TONE
@param frequency: Frequency of tone
@param duration: Duration of tone in milliseconds
@return: No return value
"""
# convert the integer values to bytes
if tone_command == self.TONE_TONE:
# duration is specified
if duration:
data = [tone_command, pin, frequency & 0x7f, frequency >> 7, duration & 0x7f, duration >> 7]
else:
data = [tone_command, pin, frequency & 0x7f, frequency >> 7, 0, 0]
self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = \
self.TONE
# turn off tone
else:
data = [tone_command, pin]
self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data)
def refresh_report_version(self):
"""
This method will query firmata for the report version.
Retrieve the report version via a call to get_firmata_version()
"""
command = [self._command_handler.REPORT_VERSION]
self._command_handler.send_command(command)
def refresh_report_firmware(self):
"""
This method will query firmata to report firmware. Retrieve the report via a
call to get_firmata_firmware_version()
"""
self._command_handler.send_sysex(self._command_handler.REPORT_FIRMWARE, None)
def reset(self):
"""
This command sends a reset message to the Arduino. The response tables will be reinitialized
@return: No return value.
"""
# set all output pins to a value of 0
for pin in range(0, self._command_handler.total_pins_discovered):
if self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE] \
== self.PWM:
self.analog_write(pin, 0)
elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE] \
== self.SERVO:
self.analog_write(pin, 0)
elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE] \
== self.TONE:
data = [self.TONE_NO_TONE, pin]
self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data)
else:
self.digital_write(pin, 0)
self._command_handler.system_reset()
def set_analog_latch(self, pin, threshold_type, threshold_value, cb=None):
"""
This method "arms" an analog pin for its data to be latched and saved in the latching table
If a callback method is provided, when latching criteria is achieved, the callback function is called
with latching data notification. In that case, the latching table is not updated.
@param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5
@param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE
@param threshold_value: numerical value - between 0 and 1023
@param cb: callback method
@return: True if successful, False if parameter data is invalid
"""
if self.ANALOG_LATCH_GT <= threshold_type <= self.ANALOG_LATCH_LTE:
if 0 <= threshold_value <= 1023:
self._command_handler.set_analog_latch(pin, threshold_type, threshold_value, cb)
return True
else:
return False
def set_digital_latch(self, pin, threshold_type, cb=None):
"""
This method "arms" a digital pin for its data to be latched and saved in the latching table
If a callback method is provided, when latching criteria is achieved, the callback function is called
with latching data notification. In that case, the latching table is not updated.
@param pin: Digital pin number
@param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW
@param cb: callback function
@return: True if successful, False if parameter data is invalid
"""
if 0 <= threshold_type <= 1:
self._command_handler.set_digital_latch(pin, threshold_type, cb)
return True
else:
return False
def set_pin_mode(self, pin, mode, pin_type, cb=None):
"""
This method sets a pin to the desired pin mode for the pin_type.
It automatically enables data reporting.
NOTE: DO NOT CALL THIS METHOD FOR I2C. See i2c_config().
@param pin: Pin number (for analog use the analog number, for example A4: use 4)
@param mode: INPUT, OUTPUT, PWM
@param pin_type: ANALOG or DIGITAL
@param cb: This is an optional callback function to report data changes to the user
@return: No return value
"""
command = [self._command_handler.SET_PIN_MODE, pin, mode]
self._command_handler.send_command(command)
# enable reporting for input pins
if mode == self.INPUT:
if pin_type == self.ANALOG:
# set analog response table to show this pin is an input pin
self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = \
self.INPUT
self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb
self.enable_analog_reporting(pin)
# if not analog it has to be digital
else:
self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = \
self.INPUT
self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb
self.enable_digital_reporting(pin)
else: # must be output - so set the tables accordingly
if pin_type == self.ANALOG:
self._command_handler.analog_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = mode
else:
self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] = mode
def set_sampling_interval(self, interval):
"""
This method sends the desired sampling interval to Firmata.
Note: Standard Firmata will ignore any interval less than 10 milliseconds
@param interval: Integer value for desired sampling interval in milliseconds
@return: No return value.
"""
data = [interval & 0x7f, interval >> 7]
self._command_handler.send_sysex(self._command_handler.SAMPLING_INTERVAL, data)
def servo_config(self, pin, min_pulse=544, max_pulse=2400):
"""
Configure a pin as a servo pin. Set pulse min, max in ms.
@param pin: Servo Pin.
@param min_pulse: Min pulse width in ms.
@param max_pulse: Max pulse width in ms.
@return: No return value
"""
self.set_pin_mode(pin, self.SERVO, self.OUTPUT)
command = [pin, min_pulse & 0x7f, min_pulse >> 7, max_pulse & 0x7f,
max_pulse >> 7]
self._command_handler.send_sysex(self._command_handler.SERVO_CONFIG, command)
def sonar_config(self, trigger_pin, echo_pin, cb=None, ping_interval=50, max_distance=200):
"""
Configure the pins,ping interval and maximum distance for an HC-SR04 type device.
Single pin configuration may be used. To do so, set both the trigger and echo pins to the same value.
Up to a maximum of 6 SONAR devices is supported
If the maximum is exceeded a message is sent to the console and the request is ignored.
NOTE: data is measured in centimeters
@param trigger_pin: The pin number of for the trigger (transmitter).
@param echo_pin: The pin number for the received echo.
@param ping_interval: Minimum interval between pings. Lowest number to use is 33 ms.Max is 127
@param max_distance: Maximum distance in cm. Max is 200.
@param cb: optional callback function to report sonar data changes
"""
if max_distance > 200:
max_distance = 200
max_distance_lsb = max_distance & 0x7f
max_distance_msb = max_distance >> 7
data = [trigger_pin, echo_pin, ping_interval, max_distance_lsb, max_distance_msb]
self.set_pin_mode(trigger_pin, self.SONAR, self.INPUT)
self.set_pin_mode(echo_pin, self.SONAR, self.INPUT)
# update the ping data map for this pin
if len(self._command_handler.active_sonar_map) > 6:
if self.verbose:
print("sonar_config: maximum number of devices assigned - ignoring request")
return
else:
with self.data_lock:
# self._command_handler.active_sonar_map[trigger_pin] = self.IGNORE
self._command_handler.active_sonar_map[trigger_pin] = [cb, [self.IGNORE]]
self._command_handler.send_sysex(self._command_handler.SONAR_CONFIG, data)
def stepper_config(self, steps_per_revolution, stepper_pins):
"""
Configure stepper motor prior to operation.
@param steps_per_revolution: number of steps per motor revolution
@param stepper_pins: a list of control pin numbers - either 4 or 2
"""
data = [self.STEPPER_CONFIGURE, steps_per_revolution & 0x7f, steps_per_revolution >> 7]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)
def stepper_step(self, motor_speed, number_of_steps):
"""
Move a stepper motor for the number of steps at the specified speed
@param motor_speed: 21 bits of data to set motor speed
@param number_of_steps: 14 bits for number of steps & direction
positive is forward, negative is reverse
"""
if number_of_steps > 0:
direction = 1
else:
direction = 0
abs_number_of_steps = abs(number_of_steps)
data = [self.STEPPER_STEP, motor_speed & 0x7f, (motor_speed >> 7) & 0x7f, motor_speed >> 14,
abs_number_of_steps & 0x7f, abs_number_of_steps >> 7, direction]
self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)
def stepper_request_library_version(self):
"""
Request the stepper library version from the Arduino.
To retrieve the version after this command is called, call
get_stepper_version
"""
data = [self.STEPPER_LIBRARY_VERSION]
self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)
|
|
#!/usr/bin/env python
# Copyright (c) 2013 GitHub, Inc.
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to generate symbols for a binary suitable for breakpad.
Currently, the tool only supports Linux, Android, and Mac. Support for other
platforms is planned.
"""
import errno
import argparse
import os
import Queue
import re
import shutil
import subprocess
import sys
import threading
CONCURRENT_TASKS=4
def GetCommandOutput(command):
"""Runs the command list, returning its output.
Prints the given command (which should be a list of one or more strings),
then runs it and returns its output (stdout) as a string.
From chromium_utils.
"""
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull,
bufsize=1)
output = proc.communicate()[0]
return output
def GetDumpSymsBinary(build_dir=None):
"""Returns the path to the dump_syms binary."""
DUMP_SYMS = 'dump_syms'
dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS)
if not os.access(dump_syms_bin, os.X_OK):
print 'Cannot find %s.' % DUMP_SYMS
sys.exit(1)
return dump_syms_bin
def FindBundlePart(full_path):
if full_path.endswith(('.dylib', '.framework', '.app')):
return os.path.basename(full_path)
elif full_path != '' and full_path != '/':
return FindBundlePart(os.path.dirname(full_path))
else:
return ''
def GetDSYMBundle(options, binary_path):
"""Finds the .dSYM bundle to the binary."""
if os.path.isabs(binary_path):
dsym_path = binary_path + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
filename = FindBundlePart(binary_path)
search_dirs = [options.build_dir, options.libchromiumcontent_dir]
if filename.endswith(('.dylib', '.framework', '.app')):
for directory in search_dirs:
dsym_path = os.path.join(directory, filename) + '.dSYM'
if os.path.exists(dsym_path):
return dsym_path
return binary_path
def GetSymbolPath(options, binary_path):
"""Finds the .dbg to the binary."""
filename = os.path.basename(binary_path)
dbg_path = os.path.join(options.libchromiumcontent_dir, filename) + '.dbg'
if os.path.exists(dbg_path):
return dbg_path
return binary_path
def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = Resolve(path.replace('@rpath', rpath), exe_path, loader_path,
[])
if os.access(new_path, os.F_OK):
return new_path
return ''
return path
def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = GetCommandOutput(['ldd', binary])
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
m = lib_re.match(line)
if m:
result.append(os.path.realpath(m.group(1)))
return result
def GetSharedLibraryDependenciesMac(binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary.
This implementation assumes that we're running on a Mac system."""
loader_path = os.path.dirname(binary)
otool = GetCommandOutput(['otool', '-l', binary]).splitlines()
rpaths = []
for idx, line in enumerate(otool):
if line.find('cmd LC_RPATH') != -1:
m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2])
rpaths.append(m.group(1))
otool = GetCommandOutput(['otool', '-L', binary]).splitlines()
lib_re = re.compile('\t(.*) \(compatibility .*\)$')
deps = []
for line in otool:
m = lib_re.match(line)
if m:
dep = Resolve(m.group(1), exe_path, loader_path, rpaths)
if dep:
deps.append(os.path.normpath(dep))
return deps
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependecies of the binary."""
deps = []
if sys.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif sys.platform == 'darwin':
deps = GetSharedLibraryDependenciesMac(binary, exe_path)
else:
print "Platform not supported."
sys.exit(1)
result = []
build_dir = os.path.abspath(options.build_dir)
for dep in deps:
if (os.access(dep, os.F_OK)):
result.append(dep)
return result
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = Queue.Queue()
print_lock = threading.Lock()
def _Worker():
while True:
binary = queue.get()
if options.verbose:
with print_lock:
print "Generating symbols for %s" % binary
if sys.platform == 'darwin':
binary = GetDSYMBundle(options, binary)
elif sys.platform == 'linux2':
binary = GetSymbolPath(options, binary)
syms = GetCommandOutput([GetDumpSymsBinary(options.build_dir), '-r', '-c',
binary])
module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-F]+) (.*)\n", syms)
output_path = os.path.join(options.symbols_dir, module_line.group(2),
module_line.group(1))
mkdir_p(output_path)
symbol_file = "%s.sym" % module_line.group(2)
f = open(os.path.join(output_path, symbol_file), 'w')
f.write(syms)
f.close()
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
def main():
parser = argparse.ArgumentParser(description='Generate Breakpad Symbols Project')
parser.add_argument('--build-dir', required=True,
help='The build output directory.')
parser.add_argument('--symbols-dir', required=True,
help='The directory where to write the symbols file.')
parser.add_argument('--libchromiumcontent-dir', required=True,
help='The directory where libchromiumcontent is downloaded.')
parser.add_argument('--binary', action='append', required=True,
help='The path of the binary to generate symbols for.')
parser.add_argument('--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_argument('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type=int, help='Number of parallel tasks to run.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print verbose status output.')
options = parser.parse_args()
for bin_file in options.binary:
if not os.access(bin_file, os.X_OK):
print "Cannot find %s." % options.binary
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
# Build the transitive closure of all dependencies.
binaries = set(options.binary)
queue = options.binary
while queue:
current_bin = queue.pop(0)
exe_path = os.path.dirname(current_bin)
deps = GetSharedLibraryDependencies(options, current_bin, exe_path)
new_deps = set(deps) - binaries
binaries |= new_deps
queue.extend(list(new_deps))
GenerateSymbols(options, binaries)
return 0
if '__main__' == __name__:
sys.exit(main())
|
|
from __future__ import unicode_literals
import base64
import os
import posixpath
import re
from itertools import takewhile
from django.utils.encoding import smart_bytes, force_text
from pipeline.conf import settings
from pipeline.storage import default_storage
from pipeline.utils import to_class, relpath
from pipeline.exceptions import CompressorError
URL_DETECTOR = r'url\([\'"]?([^\s)]+\.[a-z]+[^\'"\s]*)[\'"]?\)'
URL_REPLACER = r'url\(__EMBED__(.+?)(\?\d+)?\)'
NON_REWRITABLE_URL = re.compile(r'^(http:|https:|data:|//)')
DEFAULT_TEMPLATE_FUNC = "template"
TEMPLATE_FUNC = r"""var template = function(str){var fn = new Function('obj', 'var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push(\''+str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/<%=([\s\S]+?)%>/g,function(match,code){return "',"+code.replace(/\\'/g, "'")+",'";}).replace(/<%([\s\S]+?)%>/g,function(match,code){return "');"+code.replace(/\\'/g, "'").replace(/[\r\n\t]/g,' ')+"__p.push('";}).replace(/\r/g,'\\r').replace(/\n/g,'\\n').replace(/\t/g,'\\t')+"');}return __p.join('');");return fn;};"""
MIME_TYPES = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.ttf': 'font/truetype',
'.otf': 'font/opentype',
'.woff': 'font/woff'
}
EMBED_EXTS = MIME_TYPES.keys()
FONT_EXTS = ['.ttf', '.otf', '.woff']
class Compressor(object):
asset_contents = {}
def __init__(self, storage=default_storage, verbose=False):
self.storage = storage
self.verbose = verbose
@property
def js_compressor(self):
return to_class(settings.PIPELINE_JS_COMPRESSOR)
@property
def css_compressor(self):
return to_class(settings.PIPELINE_CSS_COMPRESSOR)
def compress_js(self, paths, templates=None, **kwargs):
"""Concatenate and compress JS files"""
js = self.concatenate(paths)
if templates:
js = js + self.compile_templates(templates)
if not settings.PIPELINE_DISABLE_WRAPPER:
js = "(function() { %s }).call(this);" % js
compressor = self.js_compressor
if compressor:
js = getattr(compressor(verbose=self.verbose), 'compress_js')(js)
return js
def compress_css(self, paths, output_filename, variant=None, **kwargs):
"""Concatenate and compress CSS files"""
css = self.concatenate_and_rewrite(paths, output_filename, variant)
compressor = self.css_compressor
if compressor:
css = getattr(compressor(verbose=self.verbose), 'compress_css')(css)
if not variant:
return css
elif variant == "datauri":
return self.with_data_uri(css)
else:
raise CompressorError("\"%s\" is not a valid variant" % variant)
def compile_templates(self, paths):
compiled = []
if not paths:
return ''
namespace = settings.PIPELINE_TEMPLATE_NAMESPACE
base_path = self.base_path(paths)
for path in paths:
contents = self.read_text(path)
contents = re.sub("\r?\n", "\\\\n", contents)
contents = re.sub("'", "\\'", contents)
name = self.template_name(path, base_path)
compiled.append("%s['%s'] = %s('%s');\n" % (
namespace,
name,
settings.PIPELINE_TEMPLATE_FUNC,
contents
))
compiler = TEMPLATE_FUNC if settings.PIPELINE_TEMPLATE_FUNC == DEFAULT_TEMPLATE_FUNC else ""
return "\n".join([
"%(namespace)s = %(namespace)s || {};" % {'namespace': namespace},
compiler,
''.join(compiled)
])
def base_path(self, paths):
def names_equal(name):
return all(n == name[0] for n in name[1:])
directory_levels = zip(*[p.split(os.sep) for p in paths])
return os.sep.join(x[0] for x in takewhile(names_equal, directory_levels))
def template_name(self, path, base):
"""Find out the name of a JS template"""
if not base:
path = os.path.basename(path)
if path == base:
base = os.path.dirname(path)
name = re.sub(r"^%s[\/\\]?(.*)%s$" % (
re.escape(base), re.escape(settings.PIPELINE_TEMPLATE_EXT)
), r"\1", path)
return re.sub(r"[\/\\]", settings.PIPELINE_TEMPLATE_SEPARATOR, name)
def concatenate_and_rewrite(self, paths, output_filename, variant=None):
"""Concatenate together files and rewrite urls"""
stylesheets = []
for path in paths:
def reconstruct(match):
asset_path = match.group(1)
if NON_REWRITABLE_URL.match(asset_path):
return "url(%s)" % asset_path
asset_url = self.construct_asset_path(asset_path, path,
output_filename, variant)
return "url(%s)" % asset_url
content = self.read_text(path)
# content needs to be unicode to avoid explosions with non-ascii chars
content = re.sub(URL_DETECTOR, reconstruct, content)
stylesheets.append(content)
return '\n'.join(stylesheets)
def concatenate(self, paths):
"""Concatenate together a list of files"""
return "\n".join([self.read_text(path) for path in paths])
def construct_asset_path(self, asset_path, css_path, output_filename, variant=None):
"""Return a rewritten asset URL for a stylesheet"""
public_path = self.absolute_path(asset_path, os.path.dirname(css_path).replace('\\', '/'))
if self.embeddable(public_path, variant):
return "__EMBED__%s" % public_path
if not posixpath.isabs(asset_path):
asset_path = self.relative_path(public_path, output_filename)
return asset_path
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.PIPELINE_EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if not ext in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.PIPELINE_EMBED_MAX_IMAGE_SIZE):
return False
return True
def with_data_uri(self, css):
def datauri(match):
path = match.group(1)
mime_type = self.mime_type(path)
data = self.encoded_content(path)
return "url(\"data:%s;charset=utf-8;base64,%s\")" % (mime_type, data)
return re.sub(URL_REPLACER, datauri, css)
def encoded_content(self, path):
"""Return the base64 encoded contents"""
if path in self.__class__.asset_contents:
return self.__class__.asset_contents[path]
data = self.read_bytes(path)
self.__class__.asset_contents[path] = force_text(base64.b64encode(data))
return self.__class__.asset_contents[path]
def mime_type(self, path):
"""Get mime-type from filename"""
name, ext = os.path.splitext(path)
return MIME_TYPES[ext]
def absolute_path(self, path, start):
"""
Return the absolute public path for an asset,
given the path of the stylesheet that contains it.
"""
if posixpath.isabs(path):
path = posixpath.join(default_storage.location, path)
else:
path = posixpath.join(start, path)
return posixpath.normpath(path)
def relative_path(self, absolute_path, output_filename):
"""Rewrite paths relative to the output stylesheet path"""
absolute_path = posixpath.join(settings.PIPELINE_ROOT, absolute_path)
output_path = posixpath.join(settings.PIPELINE_ROOT, posixpath.dirname(output_filename))
return relpath(absolute_path, output_path)
def read_bytes(self, path):
"""Read file content in binary mode"""
file = default_storage.open(path)
content = file.read()
file.close()
return content
def read_text(self, path):
content = self.read_bytes(path)
return force_text(content)
class CompressorBase(object):
def __init__(self, verbose):
self.verbose = verbose
def filter_css(self, css):
raise NotImplementedError
def filter_js(self, js):
raise NotImplementedError
class SubProcessCompressor(CompressorBase):
def execute_command(self, command, content):
import subprocess
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
if content:
content = smart_bytes(content)
stdout, stderr = pipe.communicate(content)
if stderr.strip() and pipe.returncode != 0:
raise CompressorError(stderr)
elif self.verbose:
print(stderr)
return force_text(stdout)
class NoopCompressor(CompressorBase):
def compress_js(self, js):
return js
def compress_css(self, css):
return css
|
|
import pytest
from libweasyl import staff
from libweasyl.models.helpers import CharSettings
from weasyl import profile, searchtag
from weasyl.error import WeasylError
from weasyl.test import db_utils
# Tag sets for testing
tags = searchtag.parse_tags("omega_ruby, alpha_sapphire, diamond, pearl")
tags_two = searchtag.parse_tags("omega_ruby, alpha_sapphire, diamond")
@pytest.mark.usefixtures('db')
def test_TargetRecordMissing_WeasylError_if_item_record_missing_or_invalid():
userid_tag_adder = db_utils.create_user()
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, submitid=666)
assert err.value.value == "TargetRecordMissing"
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, charid=666)
assert err.value.value == "TargetRecordMissing"
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, journalid=666)
assert err.value.value == "TargetRecordMissing"
@pytest.mark.usefixtures('db')
def test_InsufficientPermissions_WeasylError_if_user_does_not_have_tagging_permissions():
# Set up for this test
admin = db_utils.create_user()
userid_owner = db_utils.create_user()
userid_tag_adder = db_utils.create_user()
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
profile.do_manage(admin, userid_tag_adder, permission_tag=False)
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, submitid=submitid)
assert err.value.value == "InsufficientPermissions"
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, charid=charid)
assert err.value.value == "InsufficientPermissions"
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, journalid=journalid)
assert err.value.value == "InsufficientPermissions"
@pytest.mark.usefixtures('db')
def test_contentOwnerIgnoredYou_WeasylError_if_user_ignored_by_item_owner():
# Set up for this test
userid_owner = db_utils.create_user()
userid_tag_adder = db_utils.create_user()
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
db_utils.create_ignoreuser(userid_owner, userid_tag_adder)
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, submitid=submitid)
assert err.value.value == "contentOwnerIgnoredYou"
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, charid=charid)
assert err.value.value == "contentOwnerIgnoredYou"
with pytest.raises(WeasylError) as err:
searchtag.associate(userid_tag_adder, tags, journalid=journalid)
assert err.value.value == "contentOwnerIgnoredYou"
@pytest.mark.usefixtures('db')
def test_adding_tags_when_no_tags_previously_existed():
userid_owner = db_utils.create_user()
userid_tag_adder = db_utils.create_user()
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
searchtag.associate(userid_tag_adder, tags, submitid=submitid)
searchtag.associate(userid_tag_adder, tags, charid=charid)
searchtag.associate(userid_tag_adder, tags, journalid=journalid)
submission_tags = searchtag.select(submitid=submitid)
assert tags == set(submission_tags)
character_tags = searchtag.select(charid=charid)
assert tags == set(character_tags)
journal_tags = searchtag.select(journalid=journalid)
assert tags == set(journal_tags)
@pytest.mark.usefixtures('db')
def test_removing_tags():
userid_owner = db_utils.create_user()
userid_tag_adder = db_utils.create_user()
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
searchtag.associate(userid_tag_adder, tags, submitid=submitid)
searchtag.associate(userid_tag_adder, tags, charid=charid)
searchtag.associate(userid_tag_adder, tags, journalid=journalid)
# Remove the 'pearl' tag
searchtag.associate(userid_tag_adder, tags_two, submitid=submitid)
searchtag.associate(userid_tag_adder, tags_two, charid=charid)
searchtag.associate(userid_tag_adder, tags_two, journalid=journalid)
submission_tags = searchtag.select(submitid=submitid)
assert tags_two == set(submission_tags)
character_tags = searchtag.select(charid=charid)
assert tags_two == set(character_tags)
journal_tags = searchtag.select(journalid=journalid)
assert tags_two == set(journal_tags)
@pytest.mark.usefixtures('db')
def test_clearing_all_tags():
userid_owner = db_utils.create_user()
userid_tag_adder = db_utils.create_user()
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
searchtag.associate(userid_tag_adder, tags, submitid=submitid)
searchtag.associate(userid_tag_adder, tags, charid=charid)
searchtag.associate(userid_tag_adder, tags, journalid=journalid)
# Clear all tags now that they were initially set
empty_tags = set()
searchtag.associate(userid_tag_adder, empty_tags, submitid=submitid)
searchtag.associate(userid_tag_adder, empty_tags, charid=charid)
searchtag.associate(userid_tag_adder, empty_tags, journalid=journalid)
submitid_tags = searchtag.select(submitid=submitid)
assert submitid_tags == []
charid_tags = searchtag.select(charid=charid)
assert charid_tags == []
journalid_tags = searchtag.select(journalid=journalid)
assert journalid_tags == []
@pytest.mark.usefixtures('db')
def test_attempt_setting_tags_when_some_tags_have_been_restricted():
"""
Verify that tags are excluded from being added to a submission's tags if the tag is restricted
"""
userid_owner = db_utils.create_user()
userid_tag_adder = db_utils.create_user()
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
restricted_tag = searchtag.parse_restricted_tags("pearl")
searchtag.edit_user_tag_restrictions(userid_owner, restricted_tag)
searchtag.associate(userid_tag_adder, tags, submitid=submitid)
searchtag.associate(userid_tag_adder, tags, charid=charid)
searchtag.associate(userid_tag_adder, tags, journalid=journalid)
# Verify that the "pearl" tag was not added
submission_tags = searchtag.select(submitid=submitid)
assert tags_two == set(submission_tags)
character_tags = searchtag.select(charid=charid)
assert tags_two == set(character_tags)
journal_tags = searchtag.select(journalid=journalid)
assert tags_two == set(journal_tags)
@pytest.mark.usefixtures('db')
def test_moderators_and_above_can_add_restricted_tags_successfully(monkeypatch):
"""
Moderators (and admins, technical, and directors) can add restricted tags to content.
Developers are not included in this test, as they are for all intents and purposes just
normal user accounts.
"""
userid_owner = db_utils.create_user()
mod_tag_adder = db_utils.create_user()
monkeypatch.setattr(staff, 'MODS', frozenset([mod_tag_adder]))
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
submitid = db_utils.create_submission(userid_owner)
restricted_tag = searchtag.parse_restricted_tags("pearl")
searchtag.edit_user_tag_restrictions(userid_owner, restricted_tag)
searchtag.associate(mod_tag_adder, tags, submitid=submitid)
searchtag.associate(mod_tag_adder, tags, charid=charid)
searchtag.associate(mod_tag_adder, tags, journalid=journalid)
# Verify that all tags were added successfully. 'pearl' is restricted.
submission_tags = searchtag.select(submitid=submitid)
assert tags == set(submission_tags)
character_tags = searchtag.select(charid=charid)
assert tags == set(character_tags)
journal_tags = searchtag.select(journalid=journalid)
assert tags == set(journal_tags)
@pytest.mark.usefixtures('db')
def test_associate_return_values():
"""
``associate()`` returns a dict, of the following format:
return {"add_failure_restricted_tags": add_failure_restricted_tags,
"remove_failure_owner_set_tags": remove_failure_owner_set_tags}
/OR/ None
add_failure_restricted_tags is None if no tags failed to be added during the associate call,
when due to a tag being on the user or globally restricted tags list. Otherwise, it contains
a space-separated list of tags which failed to be added to the content item.
remove_failure_owner_set_tags is None if no tags failed to be removed during the associate call.
Otherwise, it contains the same space-separated list as above, however containing tags which the
content owner added and has opted to not permit others to remove.
If neither element of the dict is set, ``associate()`` returns None.
"""
config = CharSettings({'disallow-others-tag-removal'}, {}, {})
userid_owner = db_utils.create_user(config=config)
userid_tag_adder = db_utils.create_user()
submitid = db_utils.create_submission(userid_owner)
journalid = db_utils.create_journal(userid_owner)
charid = db_utils.create_character(userid_owner)
""" Test the None result (no failures), then manually clear the tags afterwards. """
result = searchtag.associate(userid_tag_adder, tags, submitid=submitid)
assert result is None
result = searchtag.associate(userid_tag_adder, tags, journalid=journalid)
assert result is None
result = searchtag.associate(userid_tag_adder, tags, journalid=journalid)
assert result is None
searchtag.associate(userid_tag_adder, set(), submitid=submitid)
searchtag.associate(userid_tag_adder, set(), journalid=journalid)
searchtag.associate(userid_tag_adder, set(), journalid=journalid)
""" Test the result:None variant (restricted tags added, no tags removed) """
restricted_tag = searchtag.parse_restricted_tags("pearl")
searchtag.edit_user_tag_restrictions(userid_owner, restricted_tag)
result = searchtag.associate(userid_tag_adder, tags, submitid=submitid)
assert "pearl" in result["add_failure_restricted_tags"]
assert result["remove_failure_owner_set_tags"] is None
result = searchtag.associate(userid_tag_adder, tags, charid=charid)
assert "pearl" in result["add_failure_restricted_tags"]
assert result["remove_failure_owner_set_tags"] is None
result = searchtag.associate(userid_tag_adder, tags, journalid=journalid)
assert "pearl" in result["add_failure_restricted_tags"]
assert result["remove_failure_owner_set_tags"] is None
searchtag.associate(userid_owner, set(), submitid=submitid)
searchtag.associate(userid_owner, set(), charid=charid)
searchtag.associate(userid_owner, set(), journalid=journalid)
searchtag.edit_user_tag_restrictions(userid_owner, set())
"""Test the None:result variant (no restricted tags added, tag removal blocked)
- Submission items will return None in this case (different method of preventing tag removal)
- Character and journal items should return the None:result variant, as expected"""
searchtag.associate(userid_owner, tags, submitid=submitid)
searchtag.associate(userid_owner, tags, charid=charid)
searchtag.associate(userid_owner, tags, journalid=journalid)
result = searchtag.associate(userid_tag_adder, tags_two, submitid=submitid)
assert result is None
result = searchtag.associate(userid_tag_adder, tags_two, charid=charid)
assert result["add_failure_restricted_tags"] is None
assert "pearl" in result["remove_failure_owner_set_tags"]
result = searchtag.associate(userid_tag_adder, tags_two, journalid=journalid)
assert result["add_failure_restricted_tags"] is None
assert "pearl" in result["remove_failure_owner_set_tags"]
searchtag.associate(userid_owner, set(), submitid=submitid)
searchtag.associate(userid_owner, set(), charid=charid)
searchtag.associate(userid_owner, set(), journalid=journalid)
"""Test the result:result variant (restricted tags added, tag removal blocked)
- Submission items will behave in the result:None variant
- Character/Journal items will behave in the result:result manner"""
restricted_tag = searchtag.parse_restricted_tags("profanity")
searchtag.edit_user_tag_restrictions(userid_owner, restricted_tag)
searchtag.associate(userid_owner, tags, submitid=submitid)
searchtag.associate(userid_owner, tags, charid=charid)
searchtag.associate(userid_owner, tags, journalid=journalid)
# Effect upon adding this set: Remove user-set tag "pearl"; add restricted tag "profanity"
tags_three = tags_two | {"profanity"}
result = searchtag.associate(userid_tag_adder, tags_three, submitid=submitid)
assert "profanity" in result["add_failure_restricted_tags"]
assert result["remove_failure_owner_set_tags"] is None
result = searchtag.associate(userid_tag_adder, tags_three, charid=charid)
assert "profanity" in result["add_failure_restricted_tags"]
assert "pearl" in result["remove_failure_owner_set_tags"]
result = searchtag.associate(userid_tag_adder, tags_three, journalid=journalid)
assert "profanity" in result["add_failure_restricted_tags"]
assert "pearl" in result["remove_failure_owner_set_tags"]
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import throw, _
from frappe.utils import cstr
from frappe.model.document import Document
from jinja2 import TemplateSyntaxError
from frappe.utils.user import is_website_user
from frappe.model.naming import make_autoname
from frappe.core.doctype.dynamic_link.dynamic_link import deduplicate_dynamic_links
from six import iteritems, string_types
from past.builtins import cmp
import functools
class Address(Document):
def __setup__(self):
self.flags.linked = False
def autoname(self):
if not self.address_title:
if self.links:
self.address_title = self.links[0].link_name
if self.address_title:
self.name = (cstr(self.address_title).strip() + "-" + cstr(_(self.address_type)).strip())
if frappe.db.exists("Address", self.name):
self.name = make_autoname(cstr(self.address_title).strip() + "-" +
cstr(self.address_type).strip() + "-.#")
else:
throw(_("Address Title is mandatory."))
def validate(self):
self.link_address()
self.validate_reference()
deduplicate_dynamic_links(self)
def link_address(self):
"""Link address based on owner"""
if not self.links and not self.is_your_company_address:
contact_name = frappe.db.get_value("Contact", {"email_id": self.owner})
if contact_name:
contact = frappe.get_cached_doc('Contact', contact_name)
for link in contact.links:
self.append('links', dict(link_doctype=link.link_doctype, link_name=link.link_name))
return True
return False
def validate_reference(self):
if self.is_your_company_address:
if not [row for row in self.links if row.link_doctype == "Company"]:
frappe.throw(_("Company is mandatory, as it is your company address"))
# removing other links
to_remove = [row for row in self.links if row.link_doctype != "Company"]
[ self.remove(row) for row in to_remove ]
def get_display(self):
return get_address_display(self.as_dict())
def has_link(self, doctype, name):
for link in self.links:
if link.link_doctype==doctype and link.link_name== name:
return True
def has_common_link(self, doc):
reference_links = [(link.link_doctype, link.link_name) for link in doc.links]
for link in self.links:
if (link.link_doctype, link.link_name) in reference_links:
return True
return False
@frappe.whitelist()
def get_default_address(doctype, name, sort_key='is_primary_address'):
'''Returns default Address name for the given doctype, name'''
out = frappe.db.sql('''select
parent,
(select `{0}` from tabAddress a where a.name=dl.parent) as `{0}`
from
`tabDynamic Link` dl
where
link_doctype=%s and
link_name=%s and
parenttype = "Address"
'''.format(sort_key), (doctype, name))
if out:
return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(y[1], x[1])))[0][0]
else:
return None
@frappe.whitelist()
def get_address_display(address_dict):
if not address_dict:
return
if not isinstance(address_dict, dict):
address_dict = frappe.db.get_value("Address", address_dict, "*", as_dict=True, cache=True) or {}
name, template = get_address_templates(address_dict)
try:
return frappe.render_template(template, address_dict)
except TemplateSyntaxError:
frappe.throw(_("There is an error in your Address Template {0}").format(name))
def get_territory_from_address(address):
"""Tries to match city, state and country of address to existing territory"""
if not address:
return
if isinstance(address, string_types):
address = frappe.get_cached_doc("Address", address)
territory = None
for fieldname in ("city", "state", "country"):
if address.get(fieldname):
territory = frappe.db.get_value("Territory", address.get(fieldname))
if territory:
break
return territory
def get_list_context(context=None):
return {
"title": _("Addresses"),
"get_list": get_address_list,
"row_template": "templates/includes/address_row.html",
'no_breadcrumbs': True,
}
def get_address_list(doctype, txt, filters, limit_start, limit_page_length = 20, order_by = None):
from frappe.www.list import get_list
user = frappe.session.user
ignore_permissions = False
if is_website_user():
if not filters: filters = []
add_name = []
contact = frappe.db.sql("""
select
address.name
from
`tabDynamic Link` as link
join
`tabAddress` as address on link.parent = address.name
where
link.parenttype = 'Address' and
link_name in(
select
link.link_name from `tabContact` as contact
join
`tabDynamic Link` as link on contact.name = link.parent
where
contact.user = %s)""",(user))
for c in contact:
add_name.append(c[0])
filters.append(("Address", "name", "in", add_name))
ignore_permissions = True
return get_list(doctype, txt, filters, limit_start, limit_page_length, ignore_permissions=ignore_permissions)
def has_website_permission(doc, ptype, user, verbose=False):
"""Returns true if there is a related lead or contact related to this document"""
contact_name = frappe.db.get_value("Contact", {"email_id": frappe.session.user})
if contact_name:
contact = frappe.get_doc('Contact', contact_name)
return contact.has_common_link(doc)
lead_name = frappe.db.get_value("Lead", {"email_id": frappe.session.user})
if lead_name:
return doc.has_link('Lead', lead_name)
return False
def get_address_templates(address):
result = frappe.db.get_value("Address Template", \
{"country": address.get("country")}, ["name", "template"])
if not result:
result = frappe.db.get_value("Address Template", \
{"is_default": 1}, ["name", "template"])
if not result:
frappe.throw(_("No default Address Template found. Please create a new one from Setup > Printing and Branding > Address Template."))
else:
return result
@frappe.whitelist()
def get_shipping_address(company, address = None):
filters = [
["Dynamic Link", "link_doctype", "=", "Company"],
["Dynamic Link", "link_name", "=", company],
["Address", "is_your_company_address", "=", 1]
]
fields = ["*"]
if address and frappe.db.get_value('Dynamic Link',
{'parent': address, 'link_name': company}):
filters.append(["Address", "name", "=", address])
address = frappe.get_all("Address", filters=filters, fields=fields) or {}
if address:
address_as_dict = address[0]
name, address_template = get_address_templates(address_as_dict)
return address_as_dict.get("name"), frappe.render_template(address_template, address_as_dict)
def get_company_address(company):
ret = frappe._dict()
ret.company_address = get_default_address('Company', company)
ret.company_address_display = get_address_display(ret.company_address)
return ret
def address_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
link_doctype = filters.pop('link_doctype')
link_name = filters.pop('link_name')
condition = ""
for fieldname, value in iteritems(filters):
condition += " and {field}={value}".format(
field=fieldname,
value=value
)
return frappe.db.sql("""select
`tabAddress`.name, `tabAddress`.city, `tabAddress`.country
from
`tabAddress`, `tabDynamic Link`
where
`tabDynamic Link`.parent = `tabAddress`.name and
`tabDynamic Link`.parenttype = 'Address' and
`tabDynamic Link`.link_doctype = %(link_doctype)s and
`tabDynamic Link`.link_name = %(link_name)s and
`tabAddress`.`{key}` like %(txt)s
{mcond} {condition}
order by
if(locate(%(_txt)s, `tabAddress`.name), locate(%(_txt)s, `tabAddress`.name), 99999),
`tabAddress`.idx desc, `tabAddress`.name
limit %(start)s, %(page_len)s """.format(
mcond=get_match_cond(doctype),
key=frappe.db.escape(searchfield),
condition=condition or ""),
{
'txt': "%%%s%%" % frappe.db.escape(txt),
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'link_name': link_name,
'link_doctype': link_doctype
})
|
|
from __future__ import division
from itertools import count
from math import ceil, sqrt
from functools import wraps
import bisect
import os
from toolz import (merge, partial, accumulate, unique, first, dissoc, valmap,
first, partition)
import toolz
from operator import getitem, setitem
from datetime import datetime
import pandas as pd
import numpy as np
import operator
import gzip
import bz2
import bcolz
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import async
from .. import threaded
from ..compatibility import unicode, apply
from ..utils import repr_long_list, IndexCallable, pseudorandom
from .utils import shard_df_on_index
from ..context import _globals
def _concat(args):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if len(args) == 1:
return args[0]
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
result = pd.concat(map(pd.Series, args))
result = type(args[0])(result.values)
result.name = args[0].name
return result
return args
def compute(*args, **kwargs):
""" Compute multiple dataframes at once """
if len(args) == 1 and isinstance(args[0], (tuple, list)):
args = args[0]
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return list(map(_concat, results))
tokens = ('-%d' % i for i in count(1))
class Scalar(object):
""" A Dask-thing to represent a scalar
TODO: Clean up this abstraction
"""
def __init__(self, dsk, _name):
self.dask = dsk
self._name = _name
self.divisions = [None, None]
@property
def _args(self):
return (self.dask, self._name)
def _keys(self):
return [(self._name, 0)]
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
from .optimize import optimize
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
class _Frame(object):
""" Superclass for DataFrame and Series """
@property
def npartitions(self):
return len(self.divisions) - 1
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
from .optimize import optimize
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
def index(self):
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name, None, self.divisions)
@property
def known_divisions(self):
return len(self.divisions) > 0 and self.divisions[0] is not None
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + next(tokens)
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
dsk2 = dict((key, (getitem, cache, (tuple, list(key))))
for key in self._keys())
return type(self)(dsk2, name, self.column_info, self.divisions)
@wraps(pd.DataFrame.drop_duplicates)
def drop_duplicates(self):
chunk = lambda s: s.drop_duplicates()
return aca(self, chunk=chunk, aggregate=chunk, columns=self.columns)
def __len__(self):
return reduction(self, len, np.sum).compute()
def map_partitions(self, func, columns=None):
""" Apply Python function on each DataFrame block
Provide columns of the output if they are not the same as the input.
"""
if columns is None:
columns = self.column_info
name = 'map_partitions' + next(tokens)
dsk = dict(((name, i), (func, (self._name, i)))
for i in range(self.npartitions))
return type(self)(merge(dsk, self.dask), name,
columns, self.divisions)
def random_split(self, p, seed=None):
""" Pseudorandomly split dataframe into different pieces row-wise
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent seed
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], seed=123) # doctest: +SKIP
"""
seeds = np.random.RandomState(seed).randint(0, np.iinfo(np.int32).max,
self.npartitions)
dsk_full = dict(((self._name + '-split-full', i),
(pd_split, (self._name, i), p, seed))
for i, seed in enumerate(seeds))
dsks = [dict(((self._name + '-split-%d' % i, j),
(getitem, (self._name + '-split-full', j), i))
for j in range(self.npartitions))
for i in range(len(p))]
return [type(self)(merge(self.dask, dsk_full, dsk),
self._name + '-split-%d' % i,
self.column_info,
self.divisions)
for i, dsk in enumerate(dsks)]
def head(self, n=10, compute=True):
""" First n rows of the dataset
Caveat, the only checks the first n rows of the first partition.
"""
name = 'head' + next(tokens)
dsk = {(name, 0): (head, (self._name, 0), n)}
result = type(self)(merge(self.dask, dsk), name,
self.column_info, self.divisions[:2])
if compute:
result = result.compute()
return result
def _loc(self, ind):
""" Helper function for the .loc accessor """
if isinstance(ind, Series):
return self._loc_series(ind)
elif isinstance(ind, slice):
return self._loc_slice(ind)
else:
return self._loc_element(ind)
def _loc_series(self, ind):
name = 'loc-series' + next(tokens)
if not self.divisions == ind.divisions:
raise ValueError("Partitions of dataframe and index not the same")
return map_partitions(lambda df, ind: df.loc[ind],
self.columns, self, ind)
def _loc_element(self, ind):
name = 'loc-element' + next(tokens)
part = _partition_of_index_value(self.divisions, ind)
dsk = {(name, 0): (lambda df: df.loc[ind], (self._name, part))}
return type(self)(merge(self.dask, dsk), name,
self.column_info, [ind, ind])
def _loc_slice(self, ind):
name = 'loc-slice' + next(tokens)
assert ind.step in (None, 1)
if ind.start:
start = _partition_of_index_value(self.divisions, ind.start)
else:
start = 0
if ind.stop is not None:
stop = _partition_of_index_value(self.divisions, ind.stop)
else:
stop = self.npartitions - 1
istart = _coerce_loc_index(self.divisions, ind.start)
istop = _coerce_loc_index(self.divisions, ind.stop)
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
divisions = [istart, istop]
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
divisions = ((max(istart, self.divisions[start])
if ind.start is not None
else self.divisions[0],) +
self.divisions[start+1:stop+1] +
(min(istop, self.divisions[stop+1])
if ind.stop is not None
else self.divisions[-1],))
assert len(divisions) == len(dsk) + 1
return type(self)(merge(self.dask, dsk),
name, self.column_info,
divisions)
@property
def loc(self):
return IndexCallable(self._loc)
@property
def iloc(self):
raise AttributeError("Dask Dataframe does not support iloc")
def repartition(self, divisions):
""" Repartition dataframe along new divisions
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
"""
return repartition(self, divisions)
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
@wraps(pd.Series.fillna)
def fillna(self, value):
func = getattr(self._partition_type, 'fillna')
return map_partitions(func, self.column_info, self, value)
def sample(self, frac):
""" Random sample of items
This only implements the ``frac`` option from pandas.
See Also:
pd.DataFrame.sample
"""
func = getattr(self._partition_type, 'sample')
return map_partitions(func, self.column_info, self, None, frac)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
def __init__(self, dsk, _name, name, divisions):
self.dask = dsk
self._name = _name
self.name = name
self.divisions = tuple(divisions)
self.dt = DatetimeAccessor(self)
self.str = StringAccessor(self)
@property
def _args(self):
return (self.dask, self._name, self.name, self.divisions)
@property
def dtype(self):
return self.head().dtype
@property
def column_info(self):
return self.name
@property
def columns(self):
return (self.name,)
def __repr__(self):
return ("dd.Series<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
def quantiles(self, q):
""" Approximate quantiles of column
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
return quantiles(self, q)
def __getitem__(self, key):
name = 'getitem' + next(tokens)
if isinstance(key, Series) and self.divisions == key.divisions:
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self.name, self.divisions)
raise NotImplementedError()
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.inv, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@wraps(pd.Series.sum)
def sum(self):
return reduction(self, pd.Series.sum, np.sum)
@wraps(pd.Series.max)
def max(self):
return reduction(self, pd.Series.max, np.max)
@wraps(pd.Series.min)
def min(self):
return reduction(self, pd.Series.min, np.min)
@wraps(pd.Series.count)
def count(self):
return reduction(self, pd.Series.count, np.sum)
@wraps(pd.Series.nunique)
def nunique(self):
return self.drop_duplicates().count()
@wraps(pd.Series.mean)
def mean(self):
def chunk(ser):
return (ser.sum(), ser.count())
def agg(seq):
sums, counts = list(zip(*seq))
return 1.0 * sum(sums) / sum(counts)
return reduction(self, chunk, agg)
@wraps(pd.Series.var)
def var(self, ddof=1):
def chunk(ser):
return (ser.sum(), (ser**2).sum(), ser.count())
def agg(seq):
x, x2, n = list(zip(*seq))
x = float(sum(x))
x2 = float(sum(x2))
n = sum(n)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
return reduction(self, chunk, agg)
@wraps(pd.Series.std)
def std(self, ddof=1):
name = 'std' + next(tokens)
df = self.var(ddof=ddof)
dsk = {(name, 0): (sqrt, (df._name, 0))}
return Scalar(merge(df.dask, dsk), name)
@wraps(pd.Series.value_counts)
def value_counts(self):
chunk = lambda s: s.value_counts()
agg = lambda s: s.groupby(level=0).sum()
return aca(self, chunk=chunk, aggregate=agg, columns=self.columns)
@wraps(pd.Series.isin)
def isin(self, other):
return elemwise(pd.Series.isin, self, other)
@wraps(pd.Series.map)
def map(self, arg, na_action=None):
return elemwise(pd.Series.map, self, arg, na_action, name=self.name)
@wraps(pd.Series.astype)
def astype(self, dtype):
return map_partitions(pd.Series.astype, self.name, self, dtype)
@wraps(pd.Series.dropna)
def dropna(self):
return map_partitions(pd.Series.dropna, self.name, self)
@wraps(pd.Series.between)
def between(self, left, right, inclusive=True):
return map_partitions(pd.Series.between, self.name, self, left, right,
inclusive)
@wraps(pd.Series.clip)
def clip(self, lower=None, upper=None):
return map_partitions(pd.Series.clip, self.name, self, lower, upper)
@wraps(pd.Series.notnull)
def notnull(self):
return map_partitions(pd.Series.notnull, self.name, self)
class Index(Series):
pass
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
This is a work in progress. It is buggy and far from complete.
Please do not use it yet.
Parameters
----------
dask: dict
The dask graph to compute this Dataframe
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
columns: list of strings
Column names. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
def __init__(self, dask, name, columns, divisions):
self.dask = dask
self._name = name
self.columns = tuple(columns)
self.divisions = tuple(divisions)
@property
def _args(self):
return (self.dask, self._name, self.columns, self.divisions)
def __getitem__(self, key):
if isinstance(key, (str, unicode)):
name = self._name + '.' + key
if key in self.columns:
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return Series(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, list):
name = '%s[%s]' % (self._name, str(key))
if all(k in self.columns for k in key):
dsk = dict(((name, i), (operator.getitem,
(self._name, i),
(list, key)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'slice-with-series' + next(tokens)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, key.dask, dsk), name,
self.columns, self.divisions)
raise NotImplementedError()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as e:
try:
return self[key]
except NotImplementedError:
raise e
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.columns)))
def __repr__(self):
return ("dd.DataFrame<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
@property
def dtypes(self):
return get(self.dask, self._keys()[0]).dtypes
def set_index(self, other, **kwargs):
return set_index(self, other, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See also:
set_index
"""
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
return self.columns
def groupby(self, key, **kwargs):
return GroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
return categorize(self, columns, **kwargs)
@wraps(pd.DataFrame.assign)
def assign(self, **kwargs):
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df = pd.DataFrame(columns=self.columns)
df2 = df.assign(**dict((k, []) for k in kwargs))
return elemwise(_assign, self, *pairs, columns=list(df2.columns))
def _assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _loc(df, start, stop, include_right_boundary=True):
"""
>>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])
>>> _loc(df, 2, None)
x
2 20
2 30
3 40
4 50
>>> _loc(df, 1, 3)
x
1 10
2 20
2 30
3 40
>>> _loc(df, 1, 3, include_right_boundary=False)
x
1 10
2 20
2 30
"""
result = df.loc[slice(start, stop)]
if not include_right_boundary:
# result = df[df.index != stop]
result = result.iloc[:result.index.get_slice_bound(stop, 'left',
result.index.inferred_type)]
return result
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o)
return o
def head(x, n):
""" First n elements of dask.Dataframe or dask.Series """
return x.head(n)
def consistent_name(names):
""" New name for series in elementwise operation
If all truthy names are the same, choose that one, otherwise, choose None
"""
allnames = set()
for name in names:
if name is None:
continue
if isinstance(name, (tuple, list)):
allnames.update(name)
else:
allnames.add(name)
if len(allnames) == 1:
return first(allnames)
else:
return None
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
columns = kwargs.get('columns', None)
name = kwargs.get('name', None)
_name = 'elemwise' + next(tokens)
dfs = [arg for arg in args if isinstance(arg, _Frame)]
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, _Frame)]
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
assert all(df.divisions == dfs[0].divisions for df in dfs)
assert all(df.npartitions == dfs[0].npartitions for df in dfs)
dsk = dict(((_name, i), (op2,) + frs)
for i, frs in enumerate(zip(*[df._keys() for df in dfs])))
if columns is not None:
return DataFrame(merge(dsk, *[df.dask for df in dfs]),
_name, columns, dfs[0].divisions)
else:
column_name = name or consistent_name(n for df in dfs
for n in df.columns)
return Series(merge(dsk, *[df.dask for df in dfs]),
_name, column_name, dfs[0].divisions)
def remove_empties(seq):
""" Remove items of length 0
>>> remove_empties([1, 2, ('empty', np.nan), 4, 5])
[1, 2, 4, 5]
>>> remove_empties([('empty', np.nan)])
[nan]
>>> remove_empties([])
[]
"""
if not seq:
return seq
seq2 = [x for x in seq
if not (isinstance(x, tuple) and x and x[0] == 'empty')]
if seq2:
return seq2
else:
return [seq[0][1]]
def empty_safe(func, arg):
"""
>>> empty_safe(sum, [1, 2, 3])
6
>>> empty_safe(sum, [])
('empty', 0)
"""
if len(arg) == 0:
return ('empty', func(arg))
else:
return func(arg)
def reduction(x, chunk, aggregate):
""" General version of reductions
>>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP
"""
a = 'reduction-chunk' + next(tokens)
dsk = dict(((a, i), (empty_safe, chunk, (x._name, i)))
for i in range(x.npartitions))
b = 'reduction-aggregation' + next(tokens)
dsk2 = {(b, 0): (aggregate, (remove_empties,
[(a,i) for i in range(x.npartitions)]))}
return Scalar(merge(x.dask, dsk, dsk2), b)
def concat(dfs):
""" Concatenate dataframes along rows
Currently only supports unknown divisions
"""
if any(df.known_divisions for df in dfs):
# For this to work we need to add a final division for "maximum element"
raise NotImplementedError("Concat can't currently handle dataframes"
" with known divisions")
name = 'concat' + next(tokens)
dsk = dict()
i = 0
for df in dfs:
for key in df._keys():
dsk[(name, i)] = key
i += 1
divisions = [None] * (i + 1)
return DataFrame(merge(dsk, *[df.dask for df in dfs]), name,
dfs[0].columns, divisions)
class GroupBy(object):
def __init__(self, df, index=None, **kwargs):
self.df = df
self.index = index
self.kwargs = kwargs
if isinstance(index, list):
assert all(i in df.columns for i in index)
elif isinstance(index, Series):
assert index.divisions == df.divisions
else:
assert index in df.columns
def apply(self, func, columns=None):
if (isinstance(self.index, Series) and
self.index._name == self.df.index._name):
df = self.df
return df.map_partitions(lambda df: df.groupby(level=0).apply(func),
columns=columns)
else:
# df = set_index(self.df, self.index, **self.kwargs)
df = shuffle(self.df, self.index, **self.kwargs)
return map_partitions(lambda df, ind: df.groupby(ind).apply(func),
columns or self.df.columns,
self.df, self.index)
def __getitem__(self, key):
if key in self.df.columns:
return SeriesGroupBy(self.df, self.index, key)
else:
raise KeyError()
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.df.columns)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return self[key]
except KeyError:
raise AttributeError()
class SeriesGroupBy(object):
def __init__(self, df, index, key, **kwargs):
self.df = df
self.index = index
self.key = key
self.kwargs = kwargs
def apply(func, columns=None):
# df = set_index(self.df, self.index, **self.kwargs)
if self.index._name == self.df.index._name:
df = self.df
return df.map_partitions(
lambda df: df.groupby(level=0)[self.key].apply(func),
columns=columns)
else:
df = shuffle(self.df, self.index, **self.kwargs)
return map_partitions(
lambda df, index: df.groupby(index).apply(func),
columns or self.df.columns,
self.df, self.index)
def sum(self):
chunk = lambda df, index: df.groupby(index)[self.key].sum()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def min(self):
chunk = lambda df, index: df.groupby(index)[self.key].min()
agg = lambda df: df.groupby(level=0).min()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def max(self):
chunk = lambda df, index: df.groupby(index)[self.key].max()
agg = lambda df: df.groupby(level=0).max()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def count(self):
chunk = lambda df, index: df.groupby(index)[self.key].count()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def mean(self):
def chunk(df, index):
g = df.groupby(index)
return g.agg({self.key: ['sum', 'count']})
def agg(df):
g = df.groupby(level=0)
x = g.agg({(self.key, 'sum'): 'sum',
(self.key, 'count'): 'sum'})
result = x[self.key]['sum'] / x[self.key]['count']
result.name = self.key
return result
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def nunique(self):
def chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
return (df.groupby(index)
.apply(pd.DataFrame.drop_duplicates, subset=self.key)
.set_index(index))
def agg(df):
return df.groupby(level=0)[self.key].nunique()
return aca([self.df, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def apply_concat_apply(args, chunk=None, aggregate=None, columns=None):
""" Apply a function to blocks, the concat, then apply again
Parameters
----------
args: dask.DataFrames
All Dataframes should be partitioned and indexed equivalently
chunk: function [block-per-arg] -> block
Function to operate on each block of data
aggregate: function concatenated-block -> block
Function to operate on the concatenated result of chunk
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args
if isinstance(arg, _Frame))
a = 'apply-concat-apply--first' + next(tokens)
dsk = dict(((a, i), (apply, chunk, (list, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args])))
for i in range(args[0].npartitions))
b = 'apply-concat-apply--second' + next(tokens)
dsk2 = {(b, 0): (aggregate,
(pd.concat,
(list, [(a, i) for i in range(args[0].npartitions)])))}
return type(args[0])(
merge(dsk, dsk2, *[a.dask for a in args
if isinstance(a, _Frame)]),
b, columns, [None, None])
def map_partitions(func, columns, *args):
""" Apply Python function on each DataFrame block
Provide columns of the output if they are not the same as the input.
"""
assert all(not isinstance(arg, _Frame) or
arg.divisions == args[0].divisions
for arg in args)
name = 'map-partitions' + next(tokens)
dsk = dict(((name, i), (apply, func,
(tuple, [(arg._name, i)
if isinstance(arg, _Frame)
else arg
for arg in args])))
for i in range(args[0].npartitions))
return type(args[0])(merge(dsk, *[arg.dask for arg in args
if isinstance(arg, _Frame)]),
name, columns, args[0].divisions)
aca = apply_concat_apply
def categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals,
ordered=False, name=col)
return df
def categorize(df, columns=None, **kwargs):
"""
Convert columns of dataframe to category dtype
This aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = df.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [df[col].drop_duplicates() for col in columns]
values = compute(distincts, **kwargs)
func = partial(categorize_block, categories=dict(zip(columns, values)))
return df.map_partitions(func, columns=df.columns)
def quantiles(df, q, **kwargs):
""" Approximate quantiles of column
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert len(df.columns) == 1
if not len(q):
return da.zeros((0,), chunks=((0,),))
from dask.array.percentile import _percentile, merge_percentiles
name = 'quantiles-1' + next(tokens)
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), q))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2' + next(tokens)
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3' + next(tokens)
merge_dsk = {(name3, 0): (merge_percentiles, q, [q] * df.npartitions,
sorted(val_dsk),
sorted(len_dsk))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return da.Array(dsk, name3, chunks=((len(q),),))
def get(dsk, keys, get=None, **kwargs):
""" Get function with optimizations specialized to dask.Dataframe """
from .optimize import optimize
dsk2 = optimize(dsk, keys, **kwargs)
get = get or _globals['get'] or threaded.get
return get(dsk2, keys, **kwargs) # use synchronous scheduler for now
def pd_split(df, p, seed=0):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], seed=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, seed)
return [df.iloc[index == i] for i in range(len(p))]
def repartition_divisions(a, b, name, out1, out2):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a: tuple
old divisions
b: tuple
new divisions
name: str
name of old dataframe
out1: str
name of temporary splits
out2: str
name of new dataframe
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
assert a[0] == b[0]
assert a[-1] == b[-1]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1
k = 0
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k = k + 1
tup = d[(out1, k - 1)]
d[(out1, k - 1)] = tup[:-1] + (True,)
c.append(a[-1])
i, j = 0, 1
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
d[(out2, j - 1)] = (pd.concat, (list, tmp))
j += 1
return d
def repartition(df, divisions):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
if isinstance(df, _Frame):
tmp = 'repartition-split' + next(tokens)
out = 'repartition-merge' + next(tokens)
dsk = repartition_divisions(df.divisions, divisions, df._name, tmp, out)
return type(df)(merge(df.dask, dsk), out, df.column_info, divisions)
elif isinstance(df, pd.core.generic.NDFrame):
name = 'repartition-dataframe' + next(tokens)
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
if isinstance(df, pd.DataFrame):
return DataFrame(dsk, name, df.columns, divisions)
if isinstance(df, pd.Series):
return Series(dsk, name, df.name, divisions)
class DatetimeAccessor(object):
""" Datetime functions
Examples
--------
>>> df.mydatetime.dt.microsecond # doctest: +SKIP
"""
def __init__(self, series):
self._series = series
def __dir__(self):
return sorted(set(dir(type(self)) + dir(pd.Series.dt)))
def _property_map(self, key):
return self._series.map_partitions(lambda s: getattr(s.dt, key))
def _function_map(self, key, *args):
func = lambda s: getattr(s.dt, key)(*args)
return self._series.map_partitions(func, *args)
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(pd.Series.dt):
if isinstance(getattr(pd.Series.dt, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise
class StringAccessor(object):
""" String functions
Examples
--------
>>> df.name.lower() # doctest: +SKIP
"""
def __init__(self, series):
self._series = series
def __dir__(self):
return sorted(set(dir(type(self)) + dir(pd.Series.str)))
def _property_map(self, key):
return self._series.map_partitions(lambda s: getattr(s.str, key))
def _function_map(self, key, *args):
func = lambda s: getattr(s.str, key)(*args)
return self._series.map_partitions(func, *args)
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(pd.Series.str):
if isinstance(getattr(pd.Series.str, key), property):
return self._property_map(key)
else:
return partial(self._function_map, key)
else:
raise
from .shuffle import set_index, set_partition, shuffle
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.migration_service import pagers
from google.cloud.aiplatform_v1.types import migratable_resource
from google.cloud.aiplatform_v1.types import migration_service
from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MigrationServiceGrpcTransport
from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
class MigrationServiceClientMeta(type):
"""Metaclass for the MigrationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[MigrationServiceTransport]]
_transport_registry["grpc"] = MigrationServiceGrpcTransport
_transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MigrationServiceClient(metaclass=MigrationServiceClientMeta):
"""A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MigrationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MigrationServiceTransport:
"""Returns the transport used by the client instance.
Returns:
MigrationServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def annotated_dataset_path(
project: str, dataset: str, annotated_dataset: str,
) -> str:
"""Returns a fully-qualified annotated_dataset string."""
return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
project=project, dataset=dataset, annotated_dataset=annotated_dataset,
)
@staticmethod
def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
"""Parses a annotated_dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/annotatedDatasets/(?P<annotated_dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
"""Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
"""Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/models/(?P<model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
"""Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
"""Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/models/(?P<model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def version_path(project: str, model: str, version: str,) -> str:
"""Returns a fully-qualified version string."""
return "projects/{project}/models/{model}/versions/{version}".format(
project=project, model=model, version=version,
)
@staticmethod
def parse_version_path(path: str) -> Dict[str, str]:
"""Parses a version path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/models/(?P<model>.+?)/versions/(?P<version>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MigrationServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the migration service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, MigrationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MigrationServiceTransport):
# transport is a MigrationServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def search_migratable_resources(
self,
request: migration_service.SearchMigratableResourcesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchMigratableResourcesPager:
r"""Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Args:
request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest):
The request object. Request message for
[MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources].
parent (str):
Required. The location that the migratable resources
should be searched from. It's the Vertex AI location
that the resources can be migrated to, not the
resources' original location. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager:
Response message for
[MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a migration_service.SearchMigratableResourcesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, migration_service.SearchMigratableResourcesRequest):
request = migration_service.SearchMigratableResourcesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.search_migratable_resources
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchMigratableResourcesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def batch_migrate_resources(
self,
request: migration_service.BatchMigrateResourcesRequest = None,
*,
parent: str = None,
migrate_resource_requests: Sequence[
migration_service.MigrateResourceRequest
] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Args:
request (google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest):
The request object. Request message for
[MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources].
parent (str):
Required. The location of the migrated resource will
live in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]):
Required. The request messages
specifying the resources to migrate.
They must be in the same location as the
destination. Up to 50 resources can be
migrated in one batch.
This corresponds to the ``migrate_resource_requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse`
Response message for
[MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, migrate_resource_requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a migration_service.BatchMigrateResourcesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, migration_service.BatchMigrateResourcesRequest):
request = migration_service.BatchMigrateResourcesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if migrate_resource_requests is not None:
request.migrate_resource_requests = migrate_resource_requests
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
migration_service.BatchMigrateResourcesResponse,
metadata_type=migration_service.BatchMigrateResourcesOperationMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("MigrationServiceClient",)
|
|
#!/usr/bin/env python
import os
import sys
import imp
import uuid
import argparse
import subprocess
# ==============================================================================
def _find_files(path, recursive=True):
found_files = []
for root, folders, files in os.walk(path):
for file_name in files:
file_name = os.path.normcase(file_name)
if file_name.endswith('.py'):
found_files.append(os.path.join(root, file_name))
if recursive:
folders[:] = (folder for folder in folders
if not folder.startswith('.'))
else:
folders[:] = []
found_files.sort()
return found_files
# ==============================================================================
def _load_module(name, path):
fp, pathname, description = imp.find_module(name, [path])
return imp.load_module(uuid.uuid4().hex, fp, pathname, description)
# ==============================================================================
def _run_tests(core_dir, tests_dir, source_dir, with_coverage=True):
module = _load_module('run', tests_dir)
if with_coverage:
try:
import coverage
except Exception:
print("WARNING: Module 'coverage' has not been found")
cov = None
else:
cov = coverage.coverage(source=[source_dir])
else:
cov = None
if cov is not None:
cov.start()
sys.path[0:0] = [core_dir]
result = module.run([])
if cov is not None:
cov.stop()
cov.save()
if result:
sys.exit(result)
# ==============================================================================
def _run_flake8(source_files, ignore=None, complexity=-1):
try:
import flake8.main
except Exception:
print("WARNING: Module 'flake8' has not been found")
return
if not isinstance(source_files, (list, tuple, frozenset, set)):
source_files = (source_files,)
ignore_errors = ('F403', 'E241')
if ignore:
if isinstance(ignore, (list, tuple, frozenset, set)):
ignore = tuple(ignore)
else:
ignore = (ignore,)
ignore_errors += ignore
for source_file in source_files:
print("flake8 %s" % source_file)
result = flake8.main.check_file(source_file,
ignore=ignore_errors,
complexity=complexity)
if result:
sys.exit(result)
# ==============================================================================
def _run_cmd_status(cmd, path=None):
if path:
env = os.environ.copy()
env['PYTHONPATH'] = path
else:
env = None
print(cmd)
p = subprocess.Popen(cmd, env=env, shell=False)
return p.wait()
# ==============================================================================
def _run_cmd(cmd, path=None):
status = _run_cmd_status(cmd, path)
if status:
sys.exit(status)
# ==============================================================================
def _fetch_repo(cur_dir, repo_name, repo_dir=None):
if repo_dir:
return os.path.abspath(repo_dir)
repo_dir = os.path.join(cur_dir, repo_name)
default_branch = 'master'
branch = os.environ.get('TRAVIS_BRANCH')
if not branch:
branch = os.environ.get('APPVEYOR_REPO_BRANCH', default_branch)
cmd = ["git", "clone", "--depth", "1",
"https://github.com/aqualid/%s.git" % repo_name, repo_dir]
status = _run_cmd_status(cmd + ["-b", branch])
if status:
if branch == default_branch:
sys.exit(status)
_run_cmd(cmd + ["-b", default_branch])
return repo_dir
# ==============================================================================
def run(core_dir, tools_dir, examples_dir, run_tests=None):
tests_dir = os.path.join(tools_dir, 'tests')
source_dir = os.path.join(tools_dir, 'tools')
core_dir = _fetch_repo(tools_dir, 'aqualid', core_dir)
if (run_tests is None) or 'tests' in run_tests:
with_coverage = True if __name__ == '__main__' else False
_run_tests(core_dir, tests_dir, source_dir, with_coverage)
if (run_tests is None) or 'flake8' in run_tests:
# check for PEP8 violations, max complexity and other standards
_run_flake8(_find_files(source_dir), complexity=9)
# check for PEP8 violations
_run_flake8(_find_files(tests_dir))
if (run_tests is None) or 'examples' in run_tests:
examples_dir = _fetch_repo(tools_dir, 'examples', examples_dir)
module = _load_module('run_ci', examples_dir)
module.run(core_dir, tools_dir, examples_dir)
# ==============================================================================
def _parse_args(choices):
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--skip', action='append', choices=choices,
dest='skip_tests',
help="Skip specific tests")
args_parser.add_argument('--run', action='append', choices=choices,
dest='run_tests',
help="Run specific tests")
args_parser.add_argument('--core-dir', action='store',
dest='core_dir', metavar='PATH',
help="Aqualid core directory. "
"By default it will be fetched from GitHub.")
args_parser.add_argument('--examples-dir', action='store',
dest='examples_dir', metavar='PATH',
help="Aqualid examples directory. "
"By default it will be fetched from GitHub.")
return args_parser.parse_args()
# ==============================================================================
def main():
choices = ['tests', 'flake8', 'examples']
args = _parse_args(choices)
if args.run_tests is None:
run_tests = set(choices)
else:
run_tests = set(args.run_tests)
if args.skip_tests:
run_tests.difference_update(args.skip_tests)
tools_dir = os.path.abspath(os.path.dirname(__file__))
run(args.core_dir, tools_dir, args.examples_dir, run_tests)
# ==============================================================================
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
flask.sessions
~~~~~~~~~~~~~~
Implements cookie based sessions based on itsdangerous.
:copyright: (c) 2016 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import Markup, json
from ._compat import iteritems, text_type
from .helpers import total_seconds
from itsdangerous import URLSafeTimedSerializer, BadSignature
class SessionMixin(object):
"""Expands a basic dictionary with an accessors that are expected
by Flask extensions and users for the session.
"""
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
#: this reflects the ``'_permanent'`` key in the dict.
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
#: some session backends can tell you if a session is new, but that is
#: not necessarily guaranteed. Use with caution. The default mixin
#: implementation just hardcodes ``False`` in.
new = False
#: for some backends this will always be ``True``, but some backends will
#: default this to false and detect changes in the dictionary for as
#: long as changes do not happen on mutable structures in the session.
#: The default mixin implementation just hardcodes ``True`` in.
modified = True
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
from flask.debughelpers import UnexpectedUnicodeError
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
class TaggedJSONSerializer(object):
"""A customized JSON serializer that supports a few extra types that
we take for granted when serializing (tuples, markup objects, datetime).
"""
def dumps(self, value):
return json.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' b':
return b64decode(the_value)
elif the_key == ' m':
return Markup(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return json.loads(value, object_hook=object_hook)
session_json_serializer = TaggedJSONSerializer()
class SecureCookieSession(CallbackDict, SessionMixin):
"""Base class for sessions based on signed cookies."""
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
"""Class used to generate nicer error messages if sessions are not
available. Will still allow read-only access to the empty session
but fail on setting.
"""
def _fail(self, *args, **kwargs):
raise RuntimeError('The session is unavailable because no secret '
'key was set. Set the secret_key on the '
'application to something unique and secret.')
__setitem__ = __delitem__ = clear = pop = popitem = \
update = setdefault = _fail
del _fail
class SessionInterface(object):
"""The basic interface you have to implement in order to replace the
default session interface which uses werkzeug's securecookie
implementation. The only methods you have to implement are
:meth:`open_session` and :meth:`save_session`, the others have
useful defaults which you don't need to change.
The session object returned by the :meth:`open_session` method has to
provide a dictionary like interface plus the properties and methods
from the :class:`SessionMixin`. We recommend just subclassing a dict
and adding that mixin::
class Session(dict, SessionMixin):
pass
If :meth:`open_session` returns ``None`` Flask will call into
:meth:`make_null_session` to create a session that acts as replacement
if the session support cannot work because some requirement is not
fulfilled. The default :class:`NullSession` class that is created
will complain that the secret key was not set.
To replace the session interface on an application all you have to do
is to assign :attr:`flask.Flask.session_interface`::
app = Flask(__name__)
app.session_interface = MySessionInterface()
.. versionadded:: 0.8
"""
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app):
"""Creates a null session which acts as a replacement object if the
real session support could not be loaded due to a configuration
error. This mainly aids the user experience because the job of the
null session is to still support lookup without complaining but
modifications are answered with a helpful error message of what
failed.
This creates an instance of :attr:`null_session_class` by default.
"""
return self.null_session_class()
def is_null_session(self, obj):
"""Checks if a given object is a null session. Null sessions are
not asked to be saved.
This checks if the object is an instance of :attr:`null_session_class`
by default.
"""
return isinstance(obj, self.null_session_class)
def get_cookie_domain(self, app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop off the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
"""Returns the path for which the cookie should be valid. The
default implementation uses the value from the ``SESSION_COOKIE_PATH``
config var if it's set, and falls back to ``APPLICATION_ROOT`` or
uses ``/`` if it's ``None``.
"""
return app.config['SESSION_COOKIE_PATH'] or \
app.config['APPLICATION_ROOT'] or '/'
def get_cookie_httponly(self, app):
"""Returns True if the session cookie should be httponly. This
currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
config var.
"""
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
"""Returns True if the cookie should be secure. This currently
just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
"""
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
"""A helper method that returns an expiration date for the session
or ``None`` if the session is linked to the browser session. The
default implementation returns now + the permanent session
lifetime configured on the application.
"""
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
"""Indicates whether a cookie should be set now or not. This is
used by session backends to figure out if they should emit a
set-cookie header or not. The default behavior is controlled by
the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If
it's set to ``False`` then a cookie is only set if the session is
modified, if set to ``True`` it's always set if the session is
permanent.
This check is usually skipped if sessions get deleted.
.. versionadded:: 1.0
"""
if session.modified:
return True
save_each = app.config['SESSION_REFRESH_EACH_REQUEST']
return save_each and session.permanent
def open_session(self, app, request):
"""This method has to be implemented and must either return ``None``
in case the loading failed because of a configuration error or an
instance of a session object which implements a dictionary like
interface + the methods and attributes on :class:`SessionMixin`.
"""
raise NotImplementedError()
def save_session(self, app, session, response):
"""This is called for actual sessions returned by :meth:`open_session`
at the end of the request. This is still called during a request
context so if you absolutely need access to the request you can do
that.
"""
raise NotImplementedError()
class SecureCookieSessionInterface(SessionInterface):
"""The default session interface that stores sessions in signed cookies
through the :mod:`itsdangerous` module.
"""
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = 'cookie-session'
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(hashlib.sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = 'hmac'
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app):
if not app.secret_key:
return None
signer_kwargs = dict(
key_derivation=self.key_derivation,
digest_method=self.digest_method
)
return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs)
def open_session(self, app, request):
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# Delete case. If there is no session we bail early.
# If the session was modified to be empty we remove the
# whole cookie.
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
# Modification case. There are upsides and downsides to
# emitting a set-cookie header each request. The behavior
# is controlled by the :meth:`should_set_cookie` method
# which performs a quick check to figure out if the cookie
# should be set or not. This is controlled by the
# SESSION_REFRESH_EACH_REQUEST config flag as well as
# the permanent flag on the session itself.
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
|
|
from common_fixtures import * # NOQA
from test_shared_volumes import add_storage_pool
def test_inactive_agent(super_client, new_context):
host = super_client.reload(new_context.host)
agent = host.agent()
c = new_context.create_container()
assert c.state == 'running'
agent = super_client.wait_success(agent.deactivate())
assert agent.state == 'inactive'
c = new_context.create_container_no_success()
assert c.transitioning == 'error'
assert c.transitioningMessage == \
'Scheduling failed: No healthy hosts with sufficient ' \
'resources available'
assert c.state == 'error'
def test_allocation_with_shared_storage_pool(super_client, new_context):
count = 3
client = new_context.client
host2 = register_simulated_host(client)
register_simulated_host(client)
hosts = [new_context.host, host2]
hosts = wait_all_success(super_client, hosts)
sp = add_storage_pool(new_context, [new_context.host.uuid, host2.uuid])
sp_name = sp.name
for h in hosts:
assert h.state == 'active'
assert h.agent().state == 'active'
assert len(h.storagePools()) == 2
assert h.storagePools()[0].state == 'active'
assert h.storagePools()[1].state == 'active'
# Create a volume with a driver that points to a storage pool
v1 = client.create_volume(name=random_str(), driver=sp_name)
v1 = client.wait_success(v1)
assert v1.state == 'inactive'
data_volume_mounts = {'/con/path': v1.id}
containers = []
for _ in range(len(hosts) * count):
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
containers.append(c)
time.sleep(1) # Sleep makes the test faster as it reduces contention
wait_all_success(super_client, containers, timeout=60)
for c in containers:
new_context.wait_for_state(c, 'running')
def test_allocate_to_host_with_pool(new_context, super_client):
# If a volumeDriver is specified that maps to an existing pool, restrict
# allocation to hosts in that pool
client = new_context.client
host = new_context.host
host2 = register_simulated_host(client)
sp = add_storage_pool(new_context)
sp_name = sp.name
assert len(host.storagePools()) == 2
assert len(host2.storagePools()) == 1
# Fail to schedule because requested host is not in pool
c = new_context.create_container_no_success(
imageUuid=new_context.image_uuid,
volumeDriver=sp_name,
requestedHostId=host2.id,
dataVolume=['vol1:/con/path'])
c = super_client.reload(c)
assert c.state == 'error'
assert c.transitioning == 'error'
assert c.transitioningMessage.startswith(
'Scheduling failed: valid host(s) [')
def test_host_vnet_association(super_client, new_context):
account = new_context.project
image_uuid = new_context.image_uuid
host1 = new_context.host
register_simulated_host(new_context.client)
register_simulated_host(new_context.client)
network = super_client.create_network(accountId=account.id)
vnet = super_client.create_vnet(accountId=account.id,
networkId=network.id,
uri='sim://')
vnet = super_client.wait_success(vnet)
assert vnet.state == 'active'
subnet1 = super_client.create_subnet(accountId=account.id,
networkAddress='192.168.0.0',
cidrSize='16',
networkId=network.id,
startAddress='192.168.0.3',
endAddress='192.168.0.5')
subnet1 = super_client.wait_success(subnet1)
subnet2 = super_client.create_subnet(accountId=account.id,
networkAddress='192.168.2.0',
cidrSize='16',
networkId=network.id,
startAddress='192.168.2.3',
endAddress='192.168.3.5')
subnet2 = super_client.wait_success(subnet2)
subnet_map1 = super_client.create_subnet_vnet_map(accountId=account.id,
subnetId=subnet1.id,
vnetId=vnet.id)
subnet_map1 = super_client.wait_success(subnet_map1)
assert subnet_map1.state == 'active'
subnet_map2 = super_client.create_subnet_vnet_map(accountId=account.id,
subnetId=subnet2.id,
vnetId=vnet.id)
subnet_map2 = super_client.wait_success(subnet_map2)
assert subnet_map2.state == 'active'
vnet_map1 = super_client.create_host_vnet_map(accountId=account.id,
hostId=host1.id,
vnetId=vnet.id)
vnet_map1 = super_client.wait_success(vnet_map1)
assert vnet_map1.state == 'active'
hosts = set()
for _ in range(3):
vm = super_client.create_virtual_machine(accountId=account.id,
subnetIds=[subnet1.id],
imageUuid=image_uuid)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
hosts.add(vm.hosts()[0].id)
for _ in range(3):
vm = super_client.create_virtual_machine(accountId=account.id,
subnetIds=[subnet2.id],
imageUuid=image_uuid)
vm = super_client.wait_success(vm)
assert vm.state == 'running'
hosts.add(vm.hosts()[0].id)
assert len(hosts) == 1
assert host1.id in hosts
def test_allocation_stay_associated_to_host(super_client, context):
c = context.create_container()
c = context.client.wait_success(c.stop())
assert c.state == 'stopped'
assert len(c.hosts()) == 1
def test_vnet_stickiness(super_client, new_context):
account_id = new_context.project.id
network = super_client.list_network(accountId=account_id,
kind='hostOnlyNetwork')[0]
subnet = super_client.list_subnet(accountId=account_id)[0]
image_uuid = new_context.image_uuid
host1 = new_context.host
host2 = register_simulated_host(new_context.client)
host3 = register_simulated_host(new_context.client)
valid_hosts = [host1.id, host2.id, host3.id]
containers = []
for i in range(0, 3):
c = super_client.reload(new_context.create_container(
requestedHostId=valid_hosts[i]))
containers.append(c)
actual_hosts = set()
for i in containers:
assert i.state == 'running'
actual_hosts.add(i.hosts()[0].id)
assert actual_hosts == set(valid_hosts)
assert len(network.vnets()) == 3
assert len(subnet.vnets()) == 3
c1_host_id = c.hosts()[0].id
c1_nic = c.nics()[0]
for _ in range(3):
c = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
vnetIds=[c1_nic.vnetId])
c = super_client.wait_success(c)
assert c.hosts()[0].id == c1_host_id
nic = c.nics()[0]
assert nic.subnetId == c1_nic.subnetId
assert nic.vnetId == c1_nic.vnetId
assert nic.networkId == c1_nic.networkId
for _ in range(3):
c = super_client.create_container(accountId=account_id,
imageUuid=image_uuid,
networkIds=[network.id],
vnetIds=[c1_nic.vnetId])
c = super_client.wait_success(c)
assert c.hosts()[0].id == c1_host_id
nic = c.nics()[0]
assert nic.subnetId == c1_nic.subnetId
assert nic.vnetId == c1_nic.vnetId
assert nic.networkId == c1_nic.networkId
def test_port_constraint(new_context):
host1 = new_context.host
client = new_context.client
image_uuid = new_context.image_uuid
containers = []
try:
c = client.wait_success(
client.create_container(imageUuid=image_uuid,
requestedHostId=host1.id,
ports=['8081:81/tcp']))
containers.append(c)
# try to deploy another container with same public port + protocol
c2 = client.wait_transitioning(
client.create_container(imageUuid=image_uuid,
ports=['8081:81/tcp']))
assert c2.transitioning == 'error'
assert c2.transitioningMessage == \
'Scheduling failed: host needs ports 8081/tcp available'
assert c2.state == 'error'
# try different public port
c3 = new_context.super_create_container(imageUuid=image_uuid,
ports=['8082:81/tcp'])
containers.append(c3)
# try different protocol
c4 = client.wait_success(
client.create_container(imageUuid=image_uuid,
ports=['8081:81/udp']))
containers.append(c4)
# UDP is now taken
c5 = client.wait_transitioning(
client.create_container(imageUuid=image_uuid,
ports=['8081:81/udp']))
assert c5.transitioning == 'error'
assert c5.transitioningMessage == \
'Scheduling failed: host needs ports 8081/udp available'
assert c5.state == 'error'
# try different bind IP
c6 = client.wait_success(
client.create_container(imageUuid=image_uuid,
requestedHostId=host1.id,
ports=['127.2.2.2:8081:81/tcp']))
containers.append(c6)
# Bind IP is now taken
c7 = client.wait_transitioning(
client.create_container(imageUuid=image_uuid,
ports=['127.2.2.2:8081:81/tcp']))
assert c7.transitioning == 'error'
assert c7.transitioningMessage == \
'Scheduling failed: host needs ports 8081/tcp available'
assert c7.state == 'error'
# increase host pool and check whether allocator picks other host
host2 = register_simulated_host(new_context.client)
c8 = client.wait_success(
client.create_container(imageUuid=image_uuid,
ports=['8081:81/tcp']))
assert c8.hosts()[0].id == host2.id
containers.append(c8)
finally:
for c in containers:
if c is not None:
new_context.delete(c)
def test_conflicting_ports_in_deployment_unit(new_context):
client = new_context.client
image_uuid = new_context.image_uuid
client.wait_success(client.create_container(name='reset',
imageUuid=image_uuid))
env = client.create_stack(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid, "ports": ['5555:6666']}
secondary_lc = {"imageUuid": image_uuid,
"name": "secondary", "ports": ['5555:6666']}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(client, svc, env)
assert 'Port 5555/tcp requested more than once.' in c.transitioningMessage
def test_simultaneous_port_allocation(new_context):
# This test ensures if two containers are allocated simultaneously, only
# one will get the port and the other will fail to allocate.
# By nature, this test is exercise a race condition, so it isn't perfect.
client = new_context.client
image_uuid = new_context.image_uuid
env = client.create_stack(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": image_uuid, "ports": ['5555:6666']}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=2)
svc = client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(client, svc, env)
assert 'host needs ports 5555/tcp available' in c.transitioningMessage
def _wait_for_compose_instance_error(client, service, env):
name = env.name + "-" + service.name + "%"
wait_for(
lambda: len(client.list_container(name_like=name, state='error')) > 0
)
return client.list_container(name_like=name, state='error')[0]
def test_request_host_override(new_context):
host = new_context.host
c = None
c2 = None
try:
c = new_context.super_create_container(validHostIds=[host.id],
ports=['8081:81/tcp'])
# try to deploy another container with same public port + protocol
# however, explicitly specify requestedHostId
c2 = new_context.super_create_container(requestedHostId=host.id,
ports=['8081:81/tcp'])
finally:
if c is not None:
new_context.delete(c)
if c2 is not None:
new_context.delete(c2)
def test_host_affinity(super_client, new_context):
host = new_context.host
host2 = register_simulated_host(new_context)
host = super_client.update(host, labels={'size': 'huge',
'latency': 'long'})
host2 = super_client.update(host2, labels={'size': 'tiny',
'latency': 'short'})
containers = []
try:
# test affinity
c = new_context.create_container(
environment={'constraint:size==huge': ''})
assert c.hosts()[0].id == host.id
containers.append(c)
c = new_context.create_container(
labels={'io.rancher.scheduler.affinity:host_label': 'size=huge'})
assert c.hosts()[0].id == host.id
containers.append(c)
# test anti-affinity
c = new_context.create_container(
environment={'constraint:size!=huge': ''})
assert c.hosts()[0].id == host2.id
containers.append(c)
c = new_context.create_container(
labels={'io.rancher.scheduler.affinity:host_label_ne':
'size=huge'})
assert c.hosts()[0].id == host2.id
containers.append(c)
# test soft affinity.
# prefer size==huge, but latency==~short if possible
c = new_context.create_container(
environment={
'constraint:size==huge': '',
'constraint:latency==~short': ''
})
assert c.hosts()[0].id == host.id
containers.append(c)
c = new_context.create_container(
labels={
'io.rancher.scheduler.affinity:host_label': 'size=huge',
'io.rancher.scheduler.affinity:host_label_soft_ne':
'latency=short'
})
assert c.hosts()[0].id == host.id
containers.append(c)
# test soft anti-affinity
c = new_context.create_container(
environment={'constraint:latency!=~long': ''})
assert c.hosts()[0].id == host2.id
containers.append(c)
c = new_context.create_container(
labels={'io.rancher.scheduler.affinity:host_label_soft_ne':
'latency=long'})
assert c.hosts()[0].id == host2.id
containers.append(c)
finally:
for c in containers:
new_context.delete(c)
def test_container_affinity(new_context):
# Two hosts
register_simulated_host(new_context)
containers = []
try:
name1 = 'affinity' + random_str()
c1 = new_context.create_container(
name=name1)
containers.append(c1)
c2 = new_context.create_container(
environment={'affinity:container==' + name1: ''})
containers.append(c2)
# check c2 is on same host as c1
assert c2.hosts()[0].id == c1.hosts()[0].id
c3 = new_context.create_container(
labels={'io.rancher.scheduler.affinity:container': name1})
containers.append(c3)
# check c3 is on same host as c1
assert c3.hosts()[0].id == c1.hosts()[0].id
c4 = new_context.create_container(
environment={'affinity:container==' + c1.uuid: ''})
containers.append(c4)
# check c4 is on same host as c1
assert c4.hosts()[0].id == c1.hosts()[0].id
c5 = new_context.create_container(
labels={
'io.rancher.scheduler.affinity:container': c1.uuid})
containers.append(c5)
# check c5 is on same host as c1
assert c5.hosts()[0].id == c1.hosts()[0].id
c6 = new_context.create_container(
environment={'affinity:container!=' + name1: ''})
containers.append(c6)
# check c6 is not on same host as c1
assert c6.hosts()[0].id != c1.hosts()[0].id
c7 = new_context.create_container(
labels={'io.rancher.scheduler.affinity:container_ne': name1})
containers.append(c7)
# check c7 is not on same host as c1
assert c7.hosts()[0].id != c1.hosts()[0].id
finally:
for c in containers:
new_context.delete(c)
def test_container_label_affinity(new_context):
# Two hosts
register_simulated_host(new_context)
containers = []
try:
c1_label = random_str()
c1 = new_context.create_container(
labels={'foo': c1_label}
)
containers.append(c1)
c2 = new_context.create_container(
environment={'affinity:foo==' + c1_label: ''})
containers.append(c2)
# check c2 is on same host as c1
assert c2.hosts()[0].id == c1.hosts()[0].id
c3 = new_context.create_container(
labels={
'io.rancher.scheduler.affinity:container_label':
'foo=' + c1_label}
)
containers.append(c3)
# check c3 is on same host as c1
assert c3.hosts()[0].id == c1.hosts()[0].id
c4_label = random_str()
c4 = new_context.create_container(
environment={'affinity:foo!=' + c1_label: ''},
labels={'foo': c4_label}
)
containers.append(c4)
# check c4 is not on same host as c1
assert c4.hosts()[0].id != c1.hosts()[0].id
c5 = new_context.create_container(
environment={
'affinity:foo!=' + c1_label: '',
'affinity:foo!=~' + c4_label: ''
})
containers.append(c5)
# since we just specified a soft anti-affinity to c4,
# check c5 is on same host as c4
assert c5.hosts()[0].id == c4.hosts()[0].id
c6 = new_context.create_container(
environment={
'affinity:foo!=' + c1_label: '',
},
labels={
'io.rancher.scheduler.affinity:container_label_soft_ne':
'foo=' + c4_label
}
)
containers.append(c6)
assert c6.hosts()[0].id == c4.hosts()[0].id
finally:
for c in containers:
new_context.delete(c)
def test_volumes_from_constraint(new_context):
# Three hosts
register_simulated_host(new_context)
register_simulated_host(new_context)
containers = []
try:
# nominal condition. start c1 before c2
c1 = new_context.create_container_no_success(startOnCreate=False)
c2 = new_context.create_container_no_success(startOnCreate=False,
dataVolumesFrom=[c1.id])
c1 = c1.start()
c2 = c2.start()
c1 = new_context.wait_for_state(c1, 'running')
c2 = new_context.wait_for_state(c2, 'running')
containers.append(c1)
containers.append(c2)
assert c1.hosts()[0].id == c2.hosts()[0].id
# less than ideal situation. start c4 before c3
c3 = new_context.create_container_no_success(startOnCreate=False)
c4 = new_context.create_container_no_success(startOnCreate=False,
dataVolumesFrom=[c3.id])
c4 = c4.start()
c4 = new_context.client.wait_transitioning(c4)
assert c4.transitioning == 'error'
assert c4.transitioningMessage == 'volumeFrom instance is not ' \
'running : Dependencies readiness' \
' error'
finally:
for c in containers:
new_context.delete(c)
def test_network_mode_constraint(new_context):
client = new_context.client
# Three hosts
register_simulated_host(new_context)
register_simulated_host(new_context)
containers = []
try:
c1 = new_context.create_container(startOnCreate=False)
c2 = new_context.create_container(startOnCreate=False,
networkMode='container',
networkContainerId=c1.id)
c1 = client.wait_success(c1.start())
c2 = client.wait_success(c2.start())
assert c1.state == 'running'
containers.append(c1)
assert c1.state == 'running'
containers.append(c2)
assert c1.hosts()[0].id == c2.hosts()[0].id
finally:
for c in containers:
new_context.delete(c)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256
def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may have range check.
# currently we don't have user configurated check.
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1)
mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1)
mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
def inference_program():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)
return scale_infer
def train_program():
scale_infer = inference_program()
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)
return [avg_cost, scale_infer]
def optimizer_func():
return fluid.optimizer.SGD(learning_rate=0.2)
def train(use_cuda, train_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_func=train_program, place=place, optimizer_func=optimizer_func)
feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
avg_cost_set = trainer.test(
reader=test_reader, feed_order=feed_order)
# get avg cost
avg_cost = np.array(avg_cost_set).mean()
print("avg_cost: %s" % avg_cost)
if float(avg_cost) < 4: # Smaller value to increase CI speed
trainer.save_params(params_dirname)
trainer.stop()
else:
print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1,
float(avg_cost)))
if math.isnan(float(avg_cost)):
sys.exit("got NaN loss, training failed.")
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)
trainer.train(
num_epochs=1,
event_handler=event_handler,
reader=train_reader,
feed_order=feed_order)
def infer(use_cuda, inference_program, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(
inference_program, param_path=params_dirname, place=place)
# Use the first data from paddle.dataset.movielens.test() as input.
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[1]], [[1]], place)
gender_id = fluid.create_lod_tensor([[1]], [[1]], place)
age_id = fluid.create_lod_tensor([[0]], [[1]], place)
job_id = fluid.create_lod_tensor([[10]], [[1]], place)
movie_id = fluid.create_lod_tensor([[783]], [[1]], place)
category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place)
movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], [[5]],
place)
results = inferencer.infer(
{
'user_id': user_id,
'gender_id': gender_id,
'age_id': age_id,
'job_id': job_id,
'movie_id': movie_id,
'category_id': category_id,
'movie_title': movie_title
},
return_numpy=False)
print("infer results: ", np.array(results[0]))
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
params_dirname = "recommender_system.inference.model"
train(
use_cuda=use_cuda,
train_program=train_program,
params_dirname=params_dirname)
infer(
use_cuda=use_cuda,
inference_program=inference_program,
params_dirname=params_dirname)
if __name__ == '__main__':
main(USE_GPU)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods related to model_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `PREDICT`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
class EstimatorSpec(
collections.namedtuple('EstimatorSpec', [
'predictions', 'loss', 'train_op', 'eval_metric_ops',
'export_outputs', 'training_chief_hooks', 'training_hooks',
'scaffold'
])):
"""Ops and objects returned from a `model_fn` and passed to `Estimator`.
`EstimatorSpec` fully defines the model to be run by `Estimator`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
export_outputs=None,
training_chief_hooks=None,
training_hooks=None,
scaffold=None):
"""Creates a validated `EstimatorSpec` instance.
Depending on the value of `mode`, different arguments are required. Namely
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
* For `mode == ModeKeys.EVAL`: required field is`loss`.
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
model_fn can populate all arguments independent of mode. In this case, some
arguments will be ignored by `Estimator`. E.g. `train_op` will be ignored
in eval and infer modes. Example:
```python
def my_model_fn(mode, features, labels):
predictions = ...
loss = ...
train_op = ...
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Alternatively, model_fn can just populate the arguments appropriate to the
given mode. Example:
```python
def my_model_fn(mode, features, labels):
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = ...
else:
loss = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = ...
else:
train_op = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = ...
else:
predictions = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Args:
mode: A `ModeKeys`. Specifies if this is training, evaluation or
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
export_outputs: Describes the output signature to be exported to
`SavedModel` and used during serving.
A dict `{name: (signature_method_name, predictions)}` where:
* name: An arbitrary name for this output.
* signature_method_name: One of the *_METHOD_NAME constants defined in
`signature_constants`, such as
`tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME`. Describes
the type of `SignatureDef` to be exported.
* predictions: Predictions `Tensor` of dict of `Tensor`.
Single-headed models only need to specify one entry in this dictionary.
Multi-headed models should specify one entry for each head.
training_chief_hooks: A list of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
training_hooks: A list of `tf.train.SessionRunHook` objects that to run on
all workers during training.
scaffold: A `tf.train.Scaffold` object that can be used to set
initialization, saver, and more to be used in training.
Returns:
A validated `EstimatorSpec` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
# Validate train_op.
if train_op is None:
if mode == ModeKeys.TRAIN:
raise ValueError('Missing train_op.')
else:
_check_is_tensor_or_operation(train_op, 'train_op')
# Validate loss.
if loss is None:
if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
raise ValueError('Missing loss.')
else:
loss = _check_is_tensor(loss, 'loss')
loss_shape = loss.get_shape()
if loss_shape.num_elements() not in (None, 1):
raise ValueError('Loss must be scalar, given: {}'.format(loss))
if not loss_shape.is_compatible_with(tensor_shape.scalar()):
loss = array_ops.reshape(loss, [])
# Validate predictions.
if predictions is None:
if mode == ModeKeys.PREDICT:
raise ValueError('Missing predictions.')
predictions = {}
else:
if isinstance(predictions, dict):
predictions = {
k: _check_is_tensor(v, 'predictions[{}]'.format(k))
for k, v in six.iteritems(predictions)
}
else:
predictions = _check_is_tensor(predictions, 'predictions')
# Validate eval_metric_ops.
if eval_metric_ops is None:
eval_metric_ops = {}
else:
if not isinstance(eval_metric_ops, dict):
raise TypeError(
'eval_metric_ops must be a dict, given: {}'.format(eval_metric_ops))
for key, metric_value in six.iteritems(eval_metric_ops):
if (not isinstance(metric_value, tuple) or
len(metric_value) != 2):
raise TypeError(
'Values of eval_metric_ops must be (metric_tensor, update_op) '
'tuples, given: {} for key: {}'.format(metric_value, key))
_check_is_tensor_or_operation(metric_value[0],
'eval_metric_ops[{}]'.format(key))
_check_is_tensor_or_operation(metric_value[1],
'eval_metric_ops[{}]'.format(key))
# Validate export_outputs.
if export_outputs is not None:
if not isinstance(export_outputs, dict):
raise TypeError('export_outputs must be dict, given: {}'.format(
export_outputs))
for v in six.itervalues(export_outputs):
if not isinstance(v, tuple) or len(v) != 2:
raise TypeError(
'Values in export_outputs must be 2-tuple, given: {}'.format(
export_outputs))
if v[0] not in (
signature_constants.CLASSIFY_METHOD_NAME,
signature_constants.PREDICT_METHOD_NAME,
signature_constants.REGRESS_METHOD_NAME):
raise ValueError(
'Invalid signature_method_name in export_outputs, '
'given: {}'.format(export_outputs))
# Validate that all tensors and ops are from the default graph.
default_graph = ops.get_default_graph()
for value in _prediction_values(predictions):
if value.graph is not default_graph:
raise ValueError('prediction values must be from the default graph.')
if loss is not None and loss.graph is not default_graph:
raise ValueError('loss must be from the default graph.')
if train_op is not None and train_op.graph is not default_graph:
raise ValueError('train_op must be from the default graph.')
for value in _eval_metric_ops_values(eval_metric_ops):
if value.graph is not default_graph:
raise ValueError(
'eval_metric_ops values must be from the default graph.')
# Validate hooks.
if training_chief_hooks is None:
training_chief_hooks = []
if training_hooks is None:
training_hooks = []
for hook in training_hooks + training_chief_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be SessionRunHook instances, given: {}'.format(
hook))
scaffold = scaffold or monitored_session.Scaffold()
# Validate scaffold.
if not isinstance(scaffold, monitored_session.Scaffold):
raise TypeError(
'scaffold must be tf.train.Scaffold. Given: {}'.format(scaffold))
return super(EstimatorSpec, cls).__new__(
cls,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
scaffold=scaffold)
def _check_is_tensor_or_operation(x, name):
if not (isinstance(x, ops.Operation) or isinstance(x, ops.Tensor)):
raise TypeError('{} must be Operation or Tensor, given: {}'.format(name, x))
def _check_is_tensor(x, tensor_name):
"""Returns `x` if it is a `Tensor`, raises TypeError otherwise."""
if not isinstance(x, ops.Tensor):
raise TypeError('{} must be Tensor, given: {}'.format(tensor_name, x))
return x
def _prediction_values(predictions):
"""Returns the values of the given predictions dict or `Tensor`."""
if predictions is None:
return []
if isinstance(predictions, dict):
return list(six.itervalues(predictions))
return [predictions]
def _eval_metric_ops_values(eval_metric_ops):
"""Returns the values of the given eval_metric_ops dict."""
if eval_metric_ops is None:
return []
result = []
for value_tuple in six.itervalues(eval_metric_ops):
result.append(value_tuple[0])
result.append(value_tuple[1])
return result
|
|
"""
Glue's fitting classes are designed to be easily subclassed for performing
custom model fitting in Glue.
See the guide on :ref:`writing custom fit plugins <fit_plugins>` for
help with using custom fitting utilities in Glue.
"""
import numpy as np
from .simpleforms import IntOption, Option
__all__ = ['BaseFitter1D',
'PolynomialFitter',
'AstropyFitter1D',
'SimpleAstropyGaussianFitter',
'BasicGaussianFitter']
class BaseFitter1D(object):
"""
Base class for 1D fitters.
This abstract class must be overwritten.
"""
label = "Fitter"
"""A short label for the fit, used by the GUI"""
param_names = []
"""list of parameter names that support restrictions"""
def __init__(self, **params):
self._constraints = {}
for k, v in params.items():
if k in self.param_names:
self.set_constraint(k, value=v)
else:
setattr(self, k, v)
def plot(self, fit_result, axes, x):
"""
Plot the result of a fit.
:param fit_result: The output from fit
:param axes: The Matplotlib axes to add the fit to
:param x: The values of X at which to visualize the model
:returns: A list of matplotlib artists. **This is important:**
plots will not be properly cleared if this isn't provided
"""
y = self.predict(fit_result, x)
result = axes.plot(x, y, '#4daf4a',
lw=3, alpha=0.8,
scalex=False, scaley=False)
return result
def _sigma_to_weights(self, dy):
if dy is not None:
return 1. / np.asarray(dy) ** 2
@property
def options(self):
"""
A dictionary of the current setting of each model hyperparameter.
Hyperparameters are defined in subclasses by creating class-level
:mod:`Option <glue.core.simpleforms>` attributes. This attribute
dict maps ``{hyperparameter_name: current_value}``
"""
result = []
for typ in type(self).mro():
result.extend(k for k, v in typ.__dict__.items()
if isinstance(v, Option))
return dict((o, getattr(self, o)) for o in result)
def summarize(self, fit_result, x, y, dy=None):
"""
Return a textual summary of the fit.
:param fit_result: The return value from :meth:`fit`
:param x: The x values passed to :meth:`fit`
:returns: A description of the fit result
:rtype: str
"""
return str(fit_result)
@property
def constraints(self):
"""
A dict of the constraints on each parameter in :attr:`param_names`.
Each value is itself a dict with 3 items:
:key value: The default value
:key fixed: True / False, indicating whether the parameter is fixed
:key bounds: [min, max] or None, indicating lower/upper limits
"""
result = {}
for p in self.param_names:
result[p] = dict(value=None, fixed=False, limits=None)
result[p].update(self._constraints.get(p, {}))
return result
def set_constraint(self, parameter_name, value=None,
fixed=None, limits=None):
"""
Update a constraint.
:param parameter_name: name of the parameter to update
:type parameter_name: str
:param value: Set the default value (optional)
:param limits: Set the limits to[min, max] (optional)
:param fixed: Set whether the parameter is fixed (optional)
"""
c = self._constraints.setdefault(parameter_name, {})
if value is not None:
c['value'] = value
if fixed is not None:
c['fixed'] = fixed
if limits is not None:
c['limits'] = limits
def build_and_fit(self, x, y, dy=None):
"""
Method which builds the arguments to fit, and calls that method
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if dy is not None:
dy = np.asarray(dy).ravel()
return self.fit(x, y, dy=dy,
constraints=self.constraints,
**self.options)
def fit(self, x, y, dy, constraints, **options):
"""
Fit the model to data.
*This must be overriden by a subclass.*
:param x: The x values of the data
:type x: :class:`numpy.ndarray`
:param y: The y values of the data
:type y: :class:`numpy.ndarray`
:param dy: 1 sigma uncertainties on each datum (optional)
:type dy: :class:`numpy.ndarray`
:param constraints: The current value of :attr:`constraints`
:param options: kwargs for model hyperparameters.
:returns: An object representing the fit result.
"""
raise NotImplementedError()
def predict(self, fit_result, x):
"""
Evaulate the model at a set of locations.
**This must be overridden in a subclass.**
:param fit_result: The result from the fit method
:param x: Locations to evaluate model at
:type x: :class:`numpy.ndarray`
:returns: model(x)
:rtype: :class:`numpy.ndarray`
"""
raise NotImplementedError()
class AstropyFitter1D(BaseFitter1D):
"""
A base class for wrapping :mod:`astropy.modeling`.
Subclasses must override :attr:`model_cls` :attr:`fitting_cls`
to point to the desired Astropy :mod:`model <astropy.modeling>`
and :mod:`fitter <astropy.modeling.fitting>` classes.
In addition, they should override :attr:`label` with a better label,
and :meth:`parameter_guesses` to generate initial guesses
"""
model_cls = None
"""class describing the model"""
fitting_cls = None
"""class to fit the model"""
label = "Base Astropy Fitter"
"""UI Label"""
@property
def param_names(self):
return self.model_cls.param_names
def predict(self, fit_result, x):
model, _ = fit_result
return model(x)
def summarize(self, fit_result, x, y, dy=None):
model, fitter = fit_result
result = [_report_fitter(fitter), ""]
pnames = list(sorted(model.param_names))
maxlen = max(map(len, pnames))
result.extend("%s = %e" % (p.ljust(maxlen), getattr(model, p).value)
for p in pnames)
return "\n".join(result)
def fit(self, x, y, dy, constraints):
m, f = self._get_model_fitter(x, y, dy, constraints)
dy = self._sigma_to_weights(dy)
return f(m, x, y, weights=dy), f
def _get_model_fitter(self, x, y, dy, constraints):
if self.model_cls is None or self.fitting_cls is None:
raise NotImplementedError("Model or fitting class is unspecified.")
params = dict((k, v['value']) for k, v in constraints.items())
# update unset parameters with guesses from data
for k, v in self.parameter_guesses(x, y, dy).items():
if params[k] is not None or constraints[k]['fixed']:
continue
params[k] = v
m = self.model_cls(**params)
f = self.fitting_cls()
for param_name, constraint in constraints.items():
param = getattr(m, param_name)
if constraint['fixed']:
param.fixed = True
if constraint['limits']:
param.min, param.max = constraint['limits']
return m, f
def parameter_guesses(self, x, y, dy):
"""
Provide initial guesses for each model parameter.
**The base implementation does nothing, and should be overridden**
:param x: X - values of the data
:type x: :class:`numpy.ndarray`
:param y: Y - values of the data
:type y: :class:`numpy.ndarray`
:param dy: ncertainties on Y(assumed to be 1 sigma)
:type dy: :class:`numpy.ndarray`
:returns: A dict maping ``{parameter_name: value guess}`` for each
parameter
"""
return {}
def _gaussian_parameter_estimates(x, y, dy):
amplitude = np.percentile(y, 95)
y = np.maximum(y / y.sum(), 0)
mean = (x * y).sum()
stddev = np.sqrt((y * (x - mean) ** 2).sum())
return dict(mean=mean, stddev=stddev, amplitude=amplitude)
class BasicGaussianFitter(BaseFitter1D):
"""
Fallback Gaussian fitter, for astropy < 0.3.
If :mod:`astropy.modeling` is installed, this class is replaced by
:class:`SimpleAstropyGaussianFitter`
"""
label = "Gaussian"
def _errorfunc(self, params, x, y, dy):
yp = self.eval(x, *params)
result = (yp - y)
if dy is not None:
result /= dy
return result
@staticmethod
def eval(x, amplitude, mean, stddev):
return np.exp(-(x - mean) ** 2 / (2 * stddev ** 2)) * amplitude
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
def fit(self, x, y, dy, constraints):
from scipy import optimize
init_values = _gaussian_parameter_estimates(x, y, dy)
init_values = [init_values[p] for p in ['amplitude', 'mean', 'stddev']]
farg = (x, y, dy)
dfunc = None
fitparams, status, dinfo, mess, ierr = optimize.leastsq(
self._errorfunc, init_values, args=farg, Dfun=dfunc,
full_output=True)
return fitparams
def predict(self, fit_result, x):
return self.eval(x, *fit_result)
def summarize(self, fit_result, x, y, dy=None):
return ("amplitude = %e\n"
"mean = %e\n"
"stddev = %e" % tuple(fit_result))
GaussianFitter = BasicGaussianFitter
try:
from astropy.modeling import models, fitting
class SimpleAstropyGaussianFitter(AstropyFitter1D):
"""
Guassian fitter using astropy.modeling.
"""
model_cls = models.Gaussian1D
fitting_cls = fitting.NonLinearLSQFitter
label = "Gaussian"
parameter_guesses = staticmethod(_gaussian_parameter_estimates)
GaussianFitter = SimpleAstropyGaussianFitter
except ImportError:
pass
class PolynomialFitter(BaseFitter1D):
"""
A polynomial model.
The degree of the polynomial is specified by :attr:`degree`
"""
label = "Polynomial"
degree = IntOption(min=0, max=5, default=3, label="Polynomial Degree")
def fit(self, x, y, dy, constraints, degree=2):
"""
Fit a ``degree``-th order polynomial to the data.
"""
w = self._sigma_to_weights(dy)
return np.polyfit(x, y, degree, w=w)
def predict(self, fit_result, x):
return np.polyval(fit_result, x)
def summarize(self, fit_result, x, y, dy=None):
return "Coefficients:\n" + "\n".join("%e" % coeff
for coeff in fit_result.tolist())
def _report_fitter(fitter):
if "nfev" in fitter.fit_info:
return "Converged in %i iterations" % fitter.fit_info['nfev']
return 'Converged'
__FITTERS__ = [PolynomialFitter, GaussianFitter]
|
|
__author__ = 'leif'
from ifind.seeker.list_reader import ListReader
from ifind.search.engine import Engine
from ifind.search.response import Response
from ifind.search.exceptions import EngineConnectionException, QueryParamException
from whoosh.index import open_dir
from whoosh.query import *
from whoosh.qparser import QueryParser
from whoosh.qparser import OrGroup, AndGroup
from whoosh import scoring
from whoosh.qparser import MultifieldParser
from whoosh import scoring
from whoosh import highlight
# , HtmlFormatter, ContextFragmenter, PinpointFragmenter, WholeFragmenter
import logging
log = logging.getLogger('ifind.search.engines.whooshtrec')
class Whooshtrec(Engine):
"""
Whoosh based search engine.
"""
def __init__(self, whoosh_index_dir='', stopwords_file='', model=1, implicit_or=False, **kwargs):
"""
Whoosh engine constructor.
Kwargs:
See Engine.
Usage:
See EngineFactory.
"""
Engine.__init__(self, **kwargs)
self.whoosh_index_dir = whoosh_index_dir
if not self.whoosh_index_dir:
raise EngineConnectionException(self.name, "'whoosh_index_dir=' keyword argument not specified")
self.stopwords_file = stopwords_file
if self.stopwords_file:
self.stopwords = ListReader(self.stopwords_file) # Open the stopwords file, read into a ListReader
self.snippet_size = 3
self.implicit_or=implicit_or
try:
# This creates a static docIndex for ALL instance of WhooshTrec.
# This will not work if you want indexes from multiple sources.
# As this currently is not the case, this is a suitable fix.
if not hasattr(Whooshtrec, 'docIndex'):
Whooshtrec.docIndex = open_dir(whoosh_index_dir)
log.debug("Whoosh Document index open: {0}".format(whoosh_index_dir))
log.debug("Documents in index: {0}".format( self.docIndex.doc_count()))
self._field = 'content'
if 'alltext' in self.docIndex.schema:
self._field = 'alltext'
log.debug("Using all text field")
if self.implicit_or:
self.parser = QueryParser(self._field, self.docIndex.schema, group=OrGroup)
log.debug("OR Query parser created")
else:
self.parser = QueryParser(self._field, self.docIndex.schema, group=AndGroup)
log.debug("AND Query parser created")
self.analyzer = self.docIndex.schema[self.parser.fieldname].analyzer
self.set_fragmenter()
#self.formatter = highlight.HtmlFormatter()
self.set_model(model)
except:
msg = "Could not open Whoosh index at: " + whoosh_index_dir
raise EngineConnectionException(self.name, msg)
def set_fragmenter(self, frag_type=0, max_chars=200, surround=20):
def make_context_frag(max_chars, surround):
log.debug("Context Fragmenter with max_chars:{0} surround:{1}".format(max_chars,surround))
return highlight.ContextFragmenter(max_chars, surround)
def make_sentence_frag(max_chars, surround):
log.debug("Sentence Fragmenter with max_chars:{0} surround:{1}".format(max_chars,surround))
return highlight.SentenceFragmenter(max_chars)
def make_pinpoint_frag(max_chars, surround):
log.debug("Pinpoint Fragmenter with max_chars:{0} surround:{1}".format(max_chars,surround))
return highlight.PinpointFragmenter(max_chars, surround, True)
frags = {0: make_context_frag,
1: make_sentence_frag,
2: make_pinpoint_frag}
if frag_type in frags:
self.fragmenter = frags[frag_type](max_chars, surround)
else:
self.fragmenter = frags[0](max_chars, surround)
def set_model(self, model, pval=None):
self.scoring_model = scoring.BM25F(B=0.75)
engine_name = "BM25F B={0}".format(0.75)
# Use the BM25F scoring module (B=0.75 is default for Whoosh)
if model == 0:
engine_name = "TFIDF"
self.scoring_model = scoring.TF_IDF() # Use the TFIDF scoring module
if model == 2:
c = 10.0
if pval:
c = pval
engine_name = "PL2 c={0}".format(c)
self.scoring_model = scoring.PL2(c=c) # Use PL2
if model == 1:
B = 0.75
if pval:
B = pval
engine_name = "BM25F B={0}".format(B)
self.scoring_model = scoring.BM25F(B=B) # Use BM25
self.searcher = self.docIndex.searcher(weighting=self.scoring_model)
log.debug("Engine Created with: {0} retrieval model".format(engine_name))
def _search(self, query):
"""
Concrete method of Engine's interface method 'search'.
Performs a search and retrieves the results as an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
Query Kwargs:
top (int): specifies maximum amount of results to return, no minimum guarantee
Returns:
ifind Response: object encapulsating a search request's results.
Raises:
EngineException
Usage:
Private method.
"""
self.__parse_query_terms(query)
return self._request(query)
def __parse_query_terms(self, query):
if not query.top:
query.top = 10
if query.top < 1:
query.top = 10
query.terms = query.terms.strip()
query.terms = unicode(query.terms)
query.parsed_terms = self.parser.parse(query.terms)
def _request(self, query):
"""
Issues a single request to Whoosh Index and returns the result as
an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
Returns:
ifind Response: object encapsulating a search request's results.
Raises:
EngineException
Usage:
Private method.
"""
#try:
response = None
page = query.skip
pagelen = query.top
log.debug("Query Issued: {0} Page: {1} Page Length: {2}".format(query.parsed_terms, page, pagelen))
search_page = self.searcher.search_page(query.parsed_terms, page, pagelen=pagelen)
setattr(search_page, 'actual_page', page)
response = self._parse_whoosh_response(query, search_page, self._field, self.fragmenter, self.snippet_size)
return response
@staticmethod
def _parse_whoosh_response(query, search_page, field, fragmenter, snippet_size):
"""
Parses Whoosh's response and returns as an ifind Response.
Args:
query (ifind Query): object encapsulating details of a search query.
results : requests library response object containing search results.
Returns:
ifind Response: object encapsulating a search request's results.
Usage:
Private method.
"""
response = Response(query.terms)
r = 0
search_page.results.fragmenter = fragmenter
for result in search_page:
title = result["title"]
if title:
title = title.strip()
else:
title = "Untitled"
if title == '':
title = "Untitled"
rank = result.rank + 1
url = "/treconomics/" + str(result.docnum)
summary = result.highlights(field,top=snippet_size)
content = result[field]
trecid = result["docid"]
trecid = trecid.strip()
source = result["source"]
response.add_result(title=title,
url=url,
summary=summary,
docid=trecid,
source=source,
rank=rank,
whooshid=result.docnum,
score=result.score,
content=content)
response.result_total = len(search_page)
# Add the total number of pages from the results object as an attribute of our response object.
# We also add the total number of results shown on the page.
setattr(response, 'total_pages', search_page.pagecount)
setattr(response, 'results_on_page', search_page.pagelen)
setattr(response, 'actual_page', search_page.actual_page)
return response
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Open Targets
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
MicroSoft OpenPAI Job wrapper for Luigi.
"OpenPAI is an open source platform that provides complete AI model training and resource management capabilities,
it is easy to extend and supports on-premise, cloud and hybrid environments in various scale."
For more information about OpenPAI : https://github.com/Microsoft/pai/, this task is tested against OpenPAI 0.7.1
Requires:
- requests: ``pip install requests``
Written and maintained by Liu, Dongqing (@liudongqing).
"""
import time
import logging
import luigi
import abc
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import json
logger = logging.getLogger('luigi-interface')
try:
import requests as rs
from requests.exceptions import HTTPError
except ImportError:
logger.warning('requests is not installed. PaiTask requires requests.')
def slot_to_dict(o):
o_dict = {}
for key in o.__slots__:
if not key.startswith('__'):
value = getattr(o, key, None)
if value is not None:
o_dict[key] = value
return o_dict
class PaiJob(object):
"""
The Open PAI job definition.
Refer to here https://github.com/Microsoft/pai/blob/master/docs/job_tutorial.md
::
{
"jobName": String,
"image": String,
"authFile": String,
"dataDir": String,
"outputDir": String,
"codeDir": String,
"virtualCluster": String,
"taskRoles": [
{
"name": String,
"taskNumber": Integer,
"cpuNumber": Integer,
"memoryMB": Integer,
"shmMB": Integer,
"gpuNumber": Integer,
"portList": [
{
"label": String,
"beginAt": Integer,
"portNumber": Integer
}
],
"command": String,
"minFailedTaskCount": Integer,
"minSucceededTaskCount": Integer
}
],
"gpuType": String,
"retryCount": Integer
}
"""
__slots__ = (
'jobName', 'image', 'authFile', 'dataDir', 'outputDir', 'codeDir', 'virtualCluster',
'taskRoles', 'gpuType', 'retryCount'
)
def __init__(self, jobName, image, tasks):
"""
Initialize a Job with required fields.
:param jobName: Name for the job, need to be unique
:param image: URL pointing to the Docker image for all tasks in the job
:param tasks: List of taskRole, one task role at least
"""
self.jobName = jobName
self.image = image
if isinstance(tasks, list) and len(tasks) != 0:
self.taskRoles = tasks
else:
raise TypeError('you must specify one task at least.')
class Port(object):
__slots__ = ('label', 'beginAt', 'portNumber')
def __init__(self, label, begin_at=0, port_number=1):
"""
The Port definition for TaskRole
:param label: Label name for the port type, required
:param begin_at: The port to begin with in the port type, 0 for random selection, required
:param port_number: Number of ports for the specific type, required
"""
self.label = label
self.beginAt = begin_at
self.portNumber = port_number
class TaskRole(object):
__slots__ = (
'name', 'taskNumber', 'cpuNumber', 'memoryMB', 'shmMB', 'gpuNumber', 'portList', 'command',
'minFailedTaskCount', 'minSucceededTaskCount'
)
def __init__(self, name, command, taskNumber=1, cpuNumber=1, memoryMB=2048, shmMB=64, gpuNumber=0, portList=[]):
"""
The TaskRole of PAI
:param name: Name for the task role, need to be unique with other roles, required
:param command: Executable command for tasks in the task role, can not be empty, required
:param taskNumber: Number of tasks for the task role, no less than 1, required
:param cpuNumber: CPU number for one task in the task role, no less than 1, required
:param shmMB: Shared memory for one task in the task role, no more than memory size, required
:param memoryMB: Memory for one task in the task role, no less than 100, required
:param gpuNumber: GPU number for one task in the task role, no less than 0, required
:param portList: List of portType to use, optional
"""
self.name = name
self.command = command
self.taskNumber = taskNumber
self.cpuNumber = cpuNumber
self.memoryMB = memoryMB
self.shmMB = shmMB
self.gpuNumber = gpuNumber
self.portList = portList
class OpenPai(luigi.Config):
pai_url = luigi.Parameter(
default='http://127.0.0.1:9186',
description='rest server url, default is http://127.0.0.1:9186')
username = luigi.Parameter(
default='admin',
description='your username')
password = luigi.Parameter(
default=None,
description='your password')
expiration = luigi.IntParameter(
default=3600,
description='expiration time in seconds')
class PaiTask(luigi.Task):
__POLL_TIME = 5
@abc.abstractproperty
def name(self):
"""Name for the job, need to be unique, required"""
return 'SklearnExample'
@abc.abstractproperty
def image(self):
"""URL pointing to the Docker image for all tasks in the job, required"""
return 'openpai/pai.example.sklearn'
@abc.abstractproperty
def tasks(self):
"""List of taskRole, one task role at least, required"""
return []
@property
def auth_file_path(self):
"""Docker registry authentication file existing on HDFS, optional"""
return None
@property
def data_dir(self):
"""Data directory existing on HDFS, optional"""
return None
@property
def code_dir(self):
"""Code directory existing on HDFS, should not contain any data and should be less than 200MB, optional"""
return None
@property
def output_dir(self):
"""Output directory on HDFS, $PAI_DEFAULT_FS_URI/$jobName/output will be used if not specified, optional"""
return '$PAI_DEFAULT_FS_URI/{0}/output'.format(self.name)
@property
def virtual_cluster(self):
"""The virtual cluster job runs on. If omitted, the job will run on default virtual cluster, optional"""
return 'default'
@property
def gpu_type(self):
"""Specify the GPU type to be used in the tasks. If omitted, the job will run on any gpu type, optional"""
return None
@property
def retry_count(self):
"""Job retry count, no less than 0, optional"""
return 0
def __init_token(self):
self.__openpai = OpenPai()
request_json = json.dumps({'username': self.__openpai.username, 'password': self.__openpai.password,
'expiration': self.__openpai.expiration})
logger.debug('Get token request {0}'.format(request_json))
response = rs.post(urljoin(self.__openpai.pai_url, '/api/v1/token'),
headers={'Content-Type': 'application/json'}, data=request_json)
logger.debug('Get token response {0}'.format(response.text))
if response.status_code != 200:
msg = 'Get token request failed, response is {}'.format(response.text)
logger.error(msg)
raise Exception(msg)
else:
self.__token = response.json()['token']
def __init__(self, *args, **kwargs):
"""
:param pai_url: The rest server url of PAI clusters, default is 'http://127.0.0.1:9186'.
:param token: The token used to auth the rest server of PAI.
"""
super(PaiTask, self).__init__(*args, **kwargs)
self.__init_token()
def __check_job_status(self):
response = rs.get(urljoin(self.__openpai.pai_url, '/api/v1/jobs/{0}'.format(self.name)))
logger.debug('Check job response {0}'.format(response.text))
if response.status_code == 404:
msg = 'Job {0} is not found'.format(self.name)
logger.debug(msg)
raise HTTPError(msg, response=response)
elif response.status_code != 200:
msg = 'Get job request failed, response is {}'.format(response.text)
logger.error(msg)
raise HTTPError(msg, response=response)
job_state = response.json()['jobStatus']['state']
if job_state in ['UNKNOWN', 'WAITING', 'RUNNING']:
logger.debug('Job {0} is running in state {1}'.format(self.name, job_state))
return False
else:
msg = 'Job {0} finished in state {1}'.format(self.name, job_state)
logger.info(msg)
if job_state == 'SUCCEED':
return True
else:
raise RuntimeError(msg)
def run(self):
job = PaiJob(self.name, self.image, self.tasks)
job.virtualCluster = self.virtual_cluster
job.authFile = self.auth_file_path
job.codeDir = self.code_dir
job.dataDir = self.data_dir
job.outputDir = self.output_dir
job.retryCount = self.retry_count
job.gpuType = self.gpu_type
request_json = json.dumps(job, default=slot_to_dict)
logger.debug('Submit job request {0}'.format(request_json))
response = rs.post(urljoin(self.__openpai.pai_url, '/api/v1/jobs'),
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(self.__token)}, data=request_json)
logger.debug('Submit job response {0}'.format(response.text))
# 202 is success for job submission, see https://github.com/Microsoft/pai/blob/master/docs/rest-server/API.md
if response.status_code != 202:
msg = 'Submit job failed, response code is {0}, body is {1}'.format(response.status_code, response.text)
logger.error(msg)
raise HTTPError(msg, response=response)
while not self.__check_job_status():
time.sleep(self.__POLL_TIME)
def output(self):
return luigi.contrib.hdfs.HdfsTarget(self.output())
def complete(self):
try:
return self.__check_job_status()
except HTTPError:
return False
except RuntimeError:
return False
|
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=invalid-name
"""Tests for notifications for models with assignable mixin."""
from datetime import datetime
from freezegun import freeze_time
from mock import patch
from sqlalchemy import and_
from ggrc import db
from ggrc.models import Assessment
from ggrc.models import Notification
from ggrc.models import NotificationType
from ggrc.models import Revision
from integration.ggrc import TestCase
from integration.ggrc import api_helper
class TestAssignableNotification(TestCase):
"""Test setting notifications for assignable mixin."""
def setUp(self):
super(TestAssignableNotification, self).setUp()
self.client.get("/login")
self._fix_notification_init()
self.api_helper = api_helper.Api()
def _fix_notification_init(self):
"""Fix Notification object init function.
This is a fix needed for correct created_at field when using freezgun. By
default the created_at field is left empty and filed by database, which
uses system time and not the fake date set by freezugun plugin. This fix
makes sure that object created in freeze_time block has all dates set with
the correct date and time.
"""
def init_decorator(init):
"""Wrapper for Notification init function."""
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
@classmethod
def _get_notifications(cls, sent=False, notif_type=None):
"""Get a notification query.
Args:
sent (boolean): flag to filter out only notifications that have been
sent.
notif_type (string): name of the notification type.
Returns:
sqlalchemy query for selected notifications.
"""
if sent:
notif_filter = Notification.sent_at.isnot(None)
else:
notif_filter = Notification.sent_at.is_(None)
if notif_type:
notif_filter = and_(notif_filter, NotificationType.name == notif_type)
return db.session.query(Notification).join(NotificationType).filter(
notif_filter
)
@patch("ggrc.notifications.common.send_email")
def test_assessment_without_verifiers(self, _):
"""Test setting notification entries for simple assessments.
This function tests that each assessment gets an entry in the
notifications table after it's been created.
Second part of the tests is to make sure that assessment status
does not add any new notification entries if the assessment
does not have a verifier.
"""
with freeze_time("2015-04-01"):
self.assertEqual(self._get_notifications().count(), 0)
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
notifications = self._get_notifications().all()
self.assertEqual(len(notifications), 6)
revisions = Revision.query.filter(
Revision.resource_type == 'Notification',
Revision.resource_id.in_([notif.id for notif in notifications])
).count()
self.assertEqual(revisions, 6)
self.api_helper.delete(asmts["A 1"])
self.api_helper.delete(asmts["A 6"])
self.assertEqual(self._get_notifications().count(), 4)
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt = Assessment.query.get(asmts["A 5"].id)
self.api_helper.modify_object(asmt, {"status": Assessment.FINAL_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt, {"status": Assessment.START_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt, {"status": Assessment.FINAL_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 0)
@patch("ggrc.notifications.common.send_email")
def test_assessment_with_verifiers(self, _):
"""Test notifications entries for declined assessments.
This tests makes sure there are extra notification entries added when a
assessment has been declined.
"""
with freeze_time("2015-04-01"):
self.assertEqual(self._get_notifications().count(), 0)
self.import_file("assessment_with_templates.csv")
asmts = {asmt.slug: asmt for asmt in Assessment.query}
notifications = self._get_notifications().all()
self.assertEqual(len(notifications), 6)
revisions = Revision.query.filter(
Revision.resource_type == 'Notification',
Revision.resource_id.in_([notif.id for notif in notifications])
).count()
self.assertEqual(revisions, 6)
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
asmt1 = Assessment.query.get(asmts["A 5"].id)
# start and finish assessment 1
self.api_helper.modify_object(asmt1,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt1, {"status": Assessment.DONE_STATE})
self.assertEqual(self._get_notifications().count(), 0)
# decline assessment 1
self.api_helper.modify_object(asmt1,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 1)
self.api_helper.modify_object(asmt1, {"status": Assessment.DONE_STATE})
self.assertEqual(self._get_notifications().count(), 1)
# decline assessment 1 the second time
self.api_helper.modify_object(asmt1,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 1)
asmt6 = Assessment.query.get(asmts["A 6"].id)
# start and finish assessment 6
self.api_helper.modify_object(asmt6,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 1)
self.api_helper.modify_object(asmt6, {"status": Assessment.DONE_STATE})
self.assertEqual(self._get_notifications().count(), 1)
# decline assessment 6
self.api_helper.modify_object(asmt6,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 2)
# send all notifications
self.client.get("/_notifications/send_daily_digest")
self.assertEqual(self._get_notifications().count(), 0)
# Refresh the object because of the lost session due to the get call.
asmt6 = Assessment.query.get(asmts["A 6"].id)
self.api_helper.modify_object(asmt6,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt6,
{"status": Assessment.DONE_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt6,
{"status": Assessment.VERIFIED_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt6,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 0)
# decline assessment 6
self.api_helper.modify_object(asmt6, {"status": Assessment.DONE_STATE})
self.assertEqual(self._get_notifications().count(), 0)
self.api_helper.modify_object(asmt6,
{"status": Assessment.PROGRESS_STATE})
self.assertEqual(self._get_notifications().count(), 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.