repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
wpoely86/vsc-base
lib/vsc/utils/run.py
2
31071
# # Copyright 2009-2013 Ghent University # # This file is part of vsc-base, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/vsc-base # # vsc-base is free software: you can redistribute it and/or modify # it under the terms of the GNU Library General Public License as # published by the Free Software Foundation, either version 2 of # the License, or (at your option) any later version. # # vsc-base is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU Library General Public License # along with vsc-base. If not, see <http://www.gnu.org/licenses/>. # """ Python module to execute a command Historical overview of existing equivalent code - EasyBuild filetools module - C{run_cmd(cmd, log_ok=True, log_all=False, simple=False, inp=None, regexp=True, log_output=False, path=None)} - C{run_cmd_qa(cmd, qa, no_qa=None, log_ok=True, log_all=False, simple=False, regexp=True, std_qa=None, path=None)} - Executes a command cmd - looks for questions and tries to answer based on qa dictionary - returns exitcode and stdout+stderr (mixed) - no input though stdin - if C{log_ok} or C{log_all} are set -> will C{log.error} if non-zero exit-code - if C{simple} is C{True} -> instead of returning a tuple (output, ec) it will just return C{True} or C{False} signifying succes - C{regexp} -> Regex used to check the output for errors. If C{True} will use default (see C{parselogForError}) - if log_output is True -> all output of command will be logged to a tempfile - path is the path run_cmd should chdir to before doing anything - Q&A: support reading stdout asynchronous and replying to a question through stdin - Manage C{managecommands} module C{Command} class - C{run} method - python-package-vsc-utils run module Command class - C{run} method - C{mympirun} (old) - C{runrun(self, cmd, returnout=False, flush=False, realcmd=False)}: - C{runrunnormal(self, cmd, returnout=False, flush=False)} - C{runrunfile(self, cmd, returnout=False, flush=False)} - C{hanything} commands/command module - C{run} method - fake pty support @author: Stijn De Weirdt (Ghent University) """ import errno import logging import os import pty import re import signal import sys import time from vsc.utils.fancylogger import getLogger, getAllExistingLoggers PROCESS_MODULE_ASYNCPROCESS_PATH = 'vsc.utils.asyncprocess' PROCESS_MODULE_SUBPROCESS_PATH = 'subprocess' RUNRUN_TIMEOUT_OUTPUT = '' RUNRUN_TIMEOUT_EXITCODE = 123 RUNRUN_QA_MAX_MISS_EXITCODE = 124 BASH = '/bin/bash' SHELL = BASH class DummyFunction(object): def __getattr__(self, name): def dummy(*args, **kwargs): pass return dummy class Run(object): """Base class for static run method""" INIT_INPUT_CLOSE = True USE_SHELL = True SHELL = SHELL # set the shell via the module constant @classmethod def run(cls, cmd, **kwargs): """static method return (exitcode,output) """ r = cls(cmd, **kwargs) return r._run() def __init__(self, cmd=None, **kwargs): """ Handle initiliastion @param cmd: command to run @param input: set "simple" input @param startpath: directory to change to before executing command @param disable_log: use fake logger (won't log anything) @param use_shell: use the subshell @param shell: change the shell """ self.input = kwargs.pop('input', None) self.startpath = kwargs.pop('startpath', None) self.use_shell = kwargs.pop('use_shell', self.USE_SHELL) self.shell = kwargs.pop('shell', self.SHELL) if kwargs.pop('disable_log', None): self.log = DummyFunction() # No logging if not hasattr(self, 'log'): self.log = getLogger(self._get_log_name()) self.cmd = cmd # actual command self._cwd_before_startpath = None self._process_module = None self._process = None self.readsize = 1024 # number of bytes to read blocking self._shellcmd = None self._popen_named_args = None self._process_exitcode = None self._process_output = None self._post_exitcode_log_failure = self.log.error super(Run, self).__init__(**kwargs) def _get_log_name(self): """Set the log name""" return self.__class__.__name__ def _prep_module(self, modulepath=None, extendfromlist=None): # these will provide the required Popen, PIPE and STDOUT if modulepath is None: modulepath = PROCESS_MODULE_SUBPROCESS_PATH fromlist = ['Popen', 'PIPE', 'STDOUT'] if extendfromlist is not None: fromlist.extend(extendfromlist) self._process_modulepath = modulepath self._process_module = __import__(self._process_modulepath, globals(), locals(), fromlist) def _run(self): """actual method Structure - pre - convert command to shell command - DONE - chdir before start - DONE - start C{Popen} - DONE - support async and subprocess - DONE - support for - filehandle - PIPE - DONE - pty - DONE - main - should capture exitcode and output - features - separate stdout and stderr ? - simple single run - no timeout/waiting - DONE - flush to - stdout - logger - DONE - both stdout and logger - process intermediate output - qa - input - qa - from file ? - text - DONE - post - parse with regexp - raise/log error on match - return - return output - log output - write to file - return in string - DONE - on C{ec > 0} - error - DONE - raiseException - simple - just return True/False """ self._run_pre() self._wait_for_process() return self._run_post() def _run_pre(self): """Non-blocking start""" if self._process_module is None: self._prep_module() if self.startpath is not None: self._start_in_path() if self._shellcmd is None: self._make_shell_command() if self._popen_named_args is None: self._make_popen_named_args() self._init_process() self._init_input() def _run_post(self): self._cleanup_process() self._post_exitcode() self._post_output() if self.startpath is not None: self._return_to_previous_start_in_path() return self._run_return() def _start_in_path(self): """Change path before the run""" if self.startpath is None: self.log.debug("_start_in_path: no startpath set") return if os.path.exists(self.startpath): if os.path.isdir(self.startpath): try: self._cwd_before_startpath = os.getcwd() # store it some one can return to it os.chdir(self.startpath) except: self.raiseException("_start_in_path: failed to change path from %s to startpath %s" % (self._cwd_before_startpath, self.startpath)) else: self.log.raiseExcpetion("_start_in_path: provided startpath %s exists but is no directory" % self.startpath) else: self.raiseException("_start_in_path: startpath %s does not exist" % self.startpath) def _return_to_previous_start_in_path(self): """Change to original path before the change to startpath""" if self._cwd_before_startpath is None: self.log.warning("_return_to_previous_start_in_path: previous cwd is empty. Not trying anything") return if os.path.exists(self._cwd_before_startpath): if os.path.isdir(self._cwd_before_startpath): try: currentpath = os.getcwd() if not currentpath == self.startpath: self.log.warning(("_return_to_previous_start_in_path: current diretory %s does not match " "startpath %s") % (currentpath, self.startpath)) os.chdir(self._cwd_before_startpath) except: self.raiseException(("_return_to_previous_start_in_path: failed to change path from current %s " "to previous path %s") % (currentpath, self._cwd_before_startpath)) else: self.log.raiseExcpetion(("_return_to_previous_start_in_path: provided previous cwd path %s exists " "but is no directory") % self._cwd_before_startpath) else: self.raiseException("_return_to_previous_start_in_path: previous cwd path %s does not exist" % self._cwd_before_startpath) def _make_popen_named_args(self, others=None): """Create the named args for Popen""" self._popen_named_args = { 'stdout': self._process_module.PIPE, 'stderr': self._process_module.STDOUT, 'stdin': self._process_module.PIPE, 'close_fds': True, 'shell': self.use_shell, 'executable': self.shell, } if others is not None: self._popen_named_args.update(others) self.log.debug("_popen_named_args %s" % self._popen_named_args) def _make_shell_command(self): """Convert cmd into shell command""" if self.cmd is None: self.log.raiseExcpetion("_make_shell_command: no cmd set.") if isinstance(self.cmd, basestring): self._shellcmd = self.cmd elif isinstance(self.cmd, (list, tuple,)): self._shellcmd = " ".join(self.cmd) else: self.log.raiseException("Failed to convert cmd %s (type %s) into shell command" % (self.cmd, type(self.cmd))) def _init_process(self): """Initialise the self._process""" try: self._process = self._process_module.Popen(self._shellcmd, **self._popen_named_args) except OSError: self.log.raiseException("_init_process: init Popen shellcmd %s failed: %s" % (self._shellcmd)) def _init_input(self): """Handle input, if any in a simple way""" if self.input is not None: # allow empty string (whatever it may mean) try: self._process.stdin.write(self.input) except: self.log.raiseException("_init_input: Failed write input %s to process" % self.input) if self.INIT_INPUT_CLOSE: self._process.stdin.close() self.log.debug("_init_input: process stdin closed") else: self.log.debug("_init_input: process stdin NOT closed") def _wait_for_process(self): """The main loop This one has most simple loop """ try: self._process_exitcode = self._process.wait() self._process_output = self._read_process(-1) # -1 is read all except: self.log.raiseException("_wait_for_process: problem during wait exitcode %s output %s" % (self._process_exitcode, self._process_output)) def _cleanup_process(self): """Cleanup any leftovers from the process""" pass def _read_process(self, readsize=None): """Read from process, return out""" if readsize is None: readsize = self.readsize if readsize is None: readsize = -1 # read all self.log.debug("_read_process: going to read with readsize %s" % readsize) out = self._process.stdout.read(readsize) return out def _post_exitcode(self): """Postprocess the exitcode in self._process_exitcode""" if not self._process_exitcode == 0: self._post_exitcode_log_failure("_post_exitcode: problem occured with cmd %s: output %s" % (self.cmd, self._process_output)) else: self.log.debug("_post_exitcode: success cmd %s: output %s" % (self.cmd, self._process_output)) def _post_output(self): """Postprocess the output in self._process_output""" pass def _run_return(self): """What to return""" return self._process_exitcode, self._process_output def _killtasks(self, tasks=None, sig=signal.SIGKILL, kill_pgid=False): """ Kill all tasks @param: tasks list of processids @param: sig, signal to use to kill @apram: kill_pgid, send kill to group """ if tasks is None: self.log.error("killtasks no tasks passed") elif isinstance(tasks, basestring): try: tasks = [int(tasks)] except: self.log.error("killtasks failed to convert tasks string %s to int" % tasks) for pid in tasks: pgid = os.getpgid(pid) try: os.kill(int(pid), sig) if kill_pgid: os.killpg(pgid, sig) self.log.debug("Killed %s with signal %s" % (pid, sig)) except OSError, err: # ERSCH is no such process, so no issue if not err.errno == errno.ESRCH: self.log.error("Failed to kill %s: %s" % (pid, err)) except Exception, err: self.log.error("Failed to kill %s: %s" % (pid, err)) def stop_tasks(self): """Cleanup current run""" self._killtasks(tasks=[self._process.pid]) try: os.waitpid(-1, os.WNOHANG) except: pass class RunNoWorries(Run): """When the exitcode is >0, log.debug instead of log.error""" def __init__(self, cmd, **kwargs): super(RunNoWorries, self).__init__(cmd, **kwargs) self._post_exitcode_log_failure = self.log.debug class RunLoopException(Exception): def __init__(self, code, output): self.code = code self.output = output def __str__(self): return "%s code %s output %s" % (self.__class__.__name__, self.code, self.output) class RunLoop(Run): """Main process is a while loop which reads the output in blocks need to read from time to time. otherwise the stdout/stderr buffer gets filled and it all stops working """ LOOP_TIMEOUT_INIT = 0.1 LOOP_TIMEOUT_MAIN = 1 def __init__(self, cmd, **kwargs): super(RunLoop, self).__init__(cmd, **kwargs) self._loop_count = None self._loop_continue = None # intial state, change this to break out the loop def _wait_for_process(self): """Loop through the process in timesteps collected output is run through _loop_process_output """ # these are initialised outside the function (cannot be forgotten, but can be overwritten) self._loop_count = 0 # internal counter self._loop_continue = True self._process_output = '' # further initialisation self._loop_initialise() time.sleep(self.LOOP_TIMEOUT_INIT) ec = self._process.poll() try: while self._loop_continue and ec < 0: output = self._read_process() self._process_output += output # process after updating the self._process_ vars self._loop_process_output(output) if len(output) == 0: time.sleep(self.LOOP_TIMEOUT_MAIN) ec = self._process.poll() self._loop_count += 1 self.log.debug("_wait_for_process: loop stopped after %s iterations (ec %s loop_continue %s)" % (self._loop_count, ec, self._loop_continue)) # read remaining data (all of it) output = self._read_process(-1) self._process_output += output self._process_exitcode = ec # process after updating the self._process_ vars self._loop_process_output_final(output) except RunLoopException, err: self.log.debug('RunLoopException %s' % err) self._process_output = err.output self._process_exitcode = err.code def _loop_initialise(self): """Initialisation before the loop starts""" pass def _loop_process_output(self, output): """Process the output that is read in blocks simplest form: do nothing """ pass def _loop_process_output_final(self, output): """Process the remaining output that is read simplest form: do the same as _loop_process_output """ self._loop_process_output(output) class RunLoopLog(RunLoop): LOOP_LOG_LEVEL = logging.INFO def _wait_for_process(self): # initialise the info logger self.log.info("Going to run cmd %s" % self._shellcmd) super(RunLoopLog, self)._wait_for_process() def _loop_process_output(self, output): """Process the output that is read in blocks send it to the logger. The logger need to be stream-like """ self.log.streamLog(self.LOOP_LOG_LEVEL, output) super(RunLoopLog, self)._loop_process_output(output) class RunLoopStdout(RunLoop): def _loop_process_output(self, output): """Process the output that is read in blocks send it to the stdout """ sys.stdout.write(output) sys.stdout.flush() super(RunLoopStdout, self)._loop_process_output(output) class RunAsync(Run): """Async process class""" def _prep_module(self, modulepath=None, extendfromlist=None): # these will provide the required Popen, PIPE and STDOUT if modulepath is None: modulepath = PROCESS_MODULE_ASYNCPROCESS_PATH if extendfromlist is None: extendfromlist = ['send_all', 'recv_some'] super(RunAsync, self)._prep_module(modulepath=modulepath, extendfromlist=extendfromlist) def _read_process(self, readsize=None): """Read from async process, return out""" if readsize is None: readsize = self.readsize if self._process.stdout is None: # Nothing yet/anymore return '' try: if readsize is not None and readsize < 0: # read all blocking (it's not why we should use async out = self._process.stdout.read() else: # non-blocking read (readsize is a maximum to return ! out = self._process_module.recv_some(self._process, maxread=readsize) return out except (IOError, Exception): # recv_some may throw Exception self.log.exception("_read_process: read failed") return '' class RunFile(Run): """Popen to filehandle""" def __init__(self, cmd, **kwargs): self.filename = kwargs.pop('filename', None) self.filehandle = None super(RunFile, self).__init__(cmd, **kwargs) def _make_popen_named_args(self, others=None): if others is None: if os.path.exists(self.filename): if os.path.isfile(self.filename): self.log.warning("_make_popen_named_args: going to overwrite existing file %s" % self.filename) elif os.path.isdir(self.filename): self.raiseException(("_make_popen_named_args: writing to filename %s impossible. Path exists and " "is a directory.") % self.filename) else: self.raiseException("_make_popen_named_args: path exists and is not a file or directory %s" % self.filename) else: dirname = os.path.dirname(self.filename) if dirname and not os.path.isdir(dirname): try: os.makedirs(dirname) except: self.log.raiseException(("_make_popen_named_args: dirname %s for file %s does not exists. " "Creating it failed.") % (dirname, self.filename)) try: self.filehandle = open(self.filename, 'w') except: self.log.raiseException("_make_popen_named_args: failed to open filehandle for file %s" % self.filename) others = { 'stdout': self.filehandle, } super(RunFile, self)._make_popen_named_args(others=others) def _cleanup_process(self): """Close the filehandle""" try: self.filehandle.close() except: self.log.raiseException("_cleanup_process: failed to close filehandle for filename %s" % self.filename) def _read_process(self, readsize=None): """Meaningless for filehandle""" return '' class RunPty(Run): """Pty support (eg for screen sessions)""" def _read_process(self, readsize=None): """This does not work for pty""" return '' def _make_popen_named_args(self, others=None): if others is None: (master, slave) = pty.openpty() others = { 'stdin': slave, 'stdout': slave, 'stderr': slave } super(RunPty, self)._make_popen_named_args(others=others) class RunTimeout(RunLoop, RunAsync): """Question/Answer processing""" def __init__(self, cmd, **kwargs): self.timeout = float(kwargs.pop('timeout', None)) self.start = time.time() super(RunTimeout, self).__init__(cmd, **kwargs) def _loop_process_output(self, output): """""" time_passed = time.time() - self.start if self.timeout is not None and time_passed > self.timeout: self.log.debug("Time passed %s > timeout %s." % (time_passed, self.timeout)) self.stop_tasks() # go out of loop raise RunLoopException(RUNRUN_TIMEOUT_EXITCODE, RUNRUN_TIMEOUT_OUTPUT) super(RunTimeout, self)._loop_process_output(output) class RunQA(RunLoop, RunAsync): """Question/Answer processing""" LOOP_MAX_MISS_COUNT = 20 INIT_INPUT_CLOSE = False CYCLE_ANSWERS = True def __init__(self, cmd, **kwargs): """ Add question and answer style running @param qa: dict with exact questions and answers @param qa_reg: dict with (named) regex-questions and answers (answers can contain named string templates) @param no_qa: list of regex that can block the output, but is not seen as a question. Regular expressions are compiled, just pass the (raw) text. """ qa = kwargs.pop('qa', {}) qa_reg = kwargs.pop('qa_reg', {}) no_qa = kwargs.pop('no_qa', []) self._loop_miss_count = None # maximum number of misses self._loop_previous_ouput_length = None # track length of output through loop super(RunQA, self).__init__(cmd, **kwargs) self.qa, self.qa_reg, self.no_qa = self._parse_qa(qa, qa_reg, no_qa) def _parse_qa(self, qa, qa_reg, no_qa): """ process the QandA dictionary - given initial set of Q and A (in dict), return dict of reg. exp. and A - make regular expression that matches the string with - replace whitespace - replace newline - qa_reg: question is compiled as is, and whitespace+ending is added - provided answers can be either strings or lists of strings (which will be used iteratively) """ def escape_special(string): specials = '.*+?(){}[]|\$^' return re.sub(r"([%s])" % ''.join(['\%s' % x for x in specials]), r"\\\1", string) SPLIT = '[\s\n]+' REG_SPLIT = re.compile(r"" + SPLIT) def process_answers(answers): """Construct list of newline-terminated answers (as strings).""" if isinstance(answers, basestring): answers = [answers] elif isinstance(answers, list): # list is manipulated when answering matching question, so take a copy answers = answers[:] else: msg_tmpl = "Invalid type for answer, not a string or list: %s (%s)" self.log.raiseException(msg_tmpl % (type(answers), answers), exception=TypeError) # add optional split at the end for i in [idx for idx, a in enumerate(answers) if not a.endswith('\n')]: answers[i] += '\n' return answers def process_question(question): """Convert string question to regex.""" split_q = [escape_special(x) for x in REG_SPLIT.split(question)] reg_q_txt = SPLIT.join(split_q) + SPLIT.rstrip('+') + "*$" reg_q = re.compile(r"" + reg_q_txt) if reg_q.search(question): return reg_q else: # this is just a sanity check on the created regex, can this actually occur? msg_tmpl = "_parse_qa process_question: question %s converted in %s does not match itself" self.log.raiseException(msg_tmpl % (question.pattern, reg_q_txt), exception=ValueError) new_qa = {} self.log.debug("new_qa: ") for question, answers in qa.items(): reg_q = process_question(question) new_qa[reg_q] = process_answers(answers) self.log.debug("new_qa[%s]: %s" % (reg_q.pattern.__repr__(), answers)) new_qa_reg = {} self.log.debug("new_qa_reg: ") for question, answers in qa_reg.items(): reg_q = re.compile(r"" + question + r"[\s\n]*$") new_qa_reg[reg_q] = process_answers(answers) self.log.debug("new_qa_reg[%s]: %s" % (reg_q.pattern.__repr__(), answers)) # simple statements, can contain wildcards new_no_qa = [re.compile(r"" + x + r"[\s\n]*$") for x in no_qa] self.log.debug("new_no_qa: %s" % [x.pattern.__repr__() for x in new_no_qa]) return new_qa, new_qa_reg, new_no_qa def _loop_initialise(self): """Initialisation before the loop starts""" self._loop_miss_count = 0 self._loop_previous_ouput_length = 0 def _loop_process_output(self, output): """Process the output that is read in blocks check the output passed to questions available """ hit = False self.log.debug('output %s all_output %s' % (output, self._process_output)) # qa first and then qa_reg nr_qa = len(self.qa) for idx, (question, answers) in enumerate(self.qa.items() + self.qa_reg.items()): res = question.search(self._process_output) if output and res: answer = answers[0] % res.groupdict() if len(answers) > 1: prev_answer = answers.pop(0) if self.CYCLE_ANSWERS: answers.append(prev_answer) self.log.debug("New answers list for question %s: %s" % (question.pattern, answers)) self.log.debug("_loop_process_output: answer %s question %s (std: %s) out %s" % (answer, question.pattern, idx >= nr_qa, self._process_output[-50:])) self._process_module.send_all(self._process, answer) hit = True break if not hit: curoutlen = len(self._process_output) if curoutlen > self._loop_previous_ouput_length: # still progress in output, just continue (but don't reset miss counter either) self._loop_previous_ouput_length = curoutlen else: noqa = False for r in self.no_qa: if r.search(self._process_output): self.log.debug("_loop_process_output: no_qa found for out %s" % self._process_output[-50:]) noqa = True if not noqa: self._loop_miss_count += 1 else: self._loop_miss_count = 0 # rreset miss counter on hit if self._loop_miss_count > self.LOOP_MAX_MISS_COUNT: self.log.debug("loop_process_output: max misses LOOP_MAX_MISS_COUNT %s reached. End of output: %s" % (self.LOOP_MAX_MISS_COUNT, self._process_output[-500:])) self.stop_tasks() # go out of loop raise RunLoopException(RUNRUN_QA_MAX_MISS_EXITCODE, self._process_output) super(RunQA, self)._loop_process_output(output) class RunAsyncLoop(RunLoop, RunAsync): """Async read in loop""" pass class RunAsyncLoopLog(RunLoopLog, RunAsync): """Async read, log to logger""" pass class RunQALog(RunLoopLog, RunQA): """Async loop QA with LoopLog""" pass class RunQAStdout(RunLoopStdout, RunQA): """Async loop QA with LoopLogStdout""" pass class RunAsyncLoopStdout(RunLoopStdout, RunAsync): """Async read, flush to stdout""" pass # convenient names # eg: from vsc.utils.run import trivial run_simple = Run.run run_simple_noworries = RunNoWorries.run run_async = RunAsync.run run_asyncloop = RunAsyncLoop.run run_timeout = RunTimeout.run run_to_file = RunFile.run run_async_to_stdout = RunAsyncLoopStdout.run run_qa = RunQA.run run_qalog = RunQALog.run run_qastdout = RunQAStdout.run if __name__ == "__main__": run_simple('echo ok')
lgpl-2.1
4,259,829,277,435,143
35.813981
132
0.568408
false
4.003479
false
false
false
madoodia/codeLab
python/Built-in_Functions.py
1
15493
######################################### Built-in Functions ######################################### abs(x) # Return the absolute value of a number # ......................................... all(iterable) # Return True if all elements of the iterable are true # equivalent to: def all(iterable): for element in iterable: if not element: return False return True # ......................................... any(iterable) # Return True if any element of the iterable is true def any(iterable): for element in iterable: if element: return True return False # ......................................... basestring() # This abstract type is the superclass for str and unicode obj = 'hello' isinstance(obj, basestring) # Return True # ......................................... bin(x) # Convert an integer number to a binary string # ......................................... bool([x]) # Convert a value to a Boolean, using the standard truth testing procedure # ......................................... bytearray([source[, encoding[, errors]]]) # Return a new array of bytes # ......................................... callable(object) # Return True if the object argument appears callable, False if not def test(): pass callable(test) # Return True class A: pass a = A() callable(A) # Return True callable(a) # Return False class B: def __call__(self): pass b = B() callable(B) # Return True callable(b) # Return True # ......................................... chr(i) # Return a string of one character whose ASCII code is the integer i # ......................................... classmethod(function) # Return a class method for function. class C(object): @classmethod def f(cls, arg1, arg2, ...): ... # The @classmethod form is a function decorator # It can be called either on the class (such as C.f()) or on an instance (such as C().f()). # The instance is ignored except for its class. If a class method is called for a derived class, the derived class object is passed as the implied first argument. # ......................................... cmp(x, y) # Compare the two objects x and y and return an integer according to the outcome # ......................................... compile(source, filename, mode[, flags[, dont_inherit]]) # Compile the source into a code or AST object # ......................................... complex([real[, imag]]) # Create a complex number with the value real + imag*j or convert a string or number to a complex number # ......................................... delattr(object, name) # This is a relative of setattr(). The arguments are an object and a string # ......................................... dict(**kwarg) dict(mapping, **kwarg) dict(iterable, **kwarg) # Create a new dictionary. The dict object is the dictionary class # ......................................... dir([object]) # Without arguments, return the list of names in the current local scope class Shape(object): def __dir__(self): return ['area', 'perimeter', 'location'] s = Shape() dir(s) # ['area', 'perimeter', 'location'] # ......................................... divmod(a, b) # Take two (non complex) numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division # ......................................... enumerate(sequence, start=0) # Return an enumerate object. sequence must be a sequence, an iterator, or some other object which supports iteration seasons = ['Spring', 'Summer', 'Fall', 'Winter'] list(enumerate(seasons)) # [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')] list(enumerate(seasons, start=1)) # [(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')] # Equivalent to: def enumerate(sequence, start=0): n = start for elem in sequence: yield n, elem n += 1 # ......................................... eval(expression[, globals[, locals]]) # The arguments are a Unicode or Latin-1 encoded string and optional globals and locals. If provided, globals must be a dictionary. If provided, locals can be any mapping object. # ......................................... execfile(filename[, globals[, locals]]) # This function is similar to the exec statement, but parses a file instead of a string # ......................................... file(name[, mode[, buffering]]) # Constructor function for the file type, described further in section File Objects isinstance(f, file) # ......................................... filter(function, iterable) # Construct a list from those elements of iterable for which function returns true # Note that filter(function, iterable) is equivalent to [item for item in iterable if function(item)] # ......................................... float([x]) # Convert a string or a number to floating point # ......................................... format(value[, format_spec]) # Convert a value to a “formatted” representation, as controlled by format_spec # ......................................... frozenset([iterable]) # Return a new frozenset object, optionally with elements taken from iterable # ......................................... getattr(object, name[, default]) # Return the value of the named attribute of object # ......................................... globals() # Return a dictionary representing the current global symbol table # ......................................... hasattr(object, name) # The arguments are an object and a string # ......................................... hash(object) # Return the hash value of the object (if it has one). Hash values are integers # ......................................... help([object]) # Invoke the built-in help system # ......................................... hex(x) # Convert an integer number (of any size) to a hexadecimal string # ......................................... id(object) # Return the “identity” of an object # ......................................... input([prompt]) # Equivalent to eval(raw_input(prompt)). # ......................................... int(x=0) int(x, base=10) # Convert a number or string x to an integer, or return 0 if no arguments are given # ......................................... isinstance(object, classinfo) # Return true if the object argument is an instance of the classinfo argument # ......................................... issubclass(class, classinfo) # Return true if class is a subclass (direct, indirect or virtual) of classinfo # ......................................... iter(o[, sentinel]) # Return an iterator object with open('mydata.txt') as fp: for line in iter(fp.readline, ''): process_line(line) # ......................................... len(s) # Return the length (the number of items) of an object # ......................................... list([iterable]) # Return a list whose items are the same and in the same order as iterable‘s items # ......................................... locals() # Update and return a dictionary representing the current local symbol table # ......................................... long(x=0) long(x, base=10) # Convert a string or number to a long integer. # ......................................... map(function, iterable, ...) # Apply function to every item of iterable and return a list of the results def adder(a, b): return a + b numbers1 = [2, 4, 6, 8, 1, 10, 8, 9] numbers2 = [4, 6, 8, 1, 10, 8, 9, 1] mapper = map(adder, numbers1, numbers2) # Result: [6, 10, 14, 9, 11, 18, 17, 10] # # ......................................... max(iterable[, key]) max(arg1, arg2, *args[, key]) # Return the largest item in an iterable or the largest of two or more arguments. # ......................................... memoryview(obj) # Return a “memory view” object created from the given argument # ......................................... min(iterable[, key]) min(arg1, arg2, *args[, key]) # Return the smallest item in an iterable or the smallest of two or more arguments. # ......................................... next(iterator[, default]) # Retrieve the next item from the iterator by calling its next() method # ......................................... object() # Return a new featureless object # ......................................... oct(x) # Convert an integer number (of any size) to an octal string # ......................................... open(name[, mode[, buffering]]) # Open a file, returning an object of the file type described in section File Objects. If the file cannot be opened, IOError is raised # ......................................... ord(c) # Given a string of length one, return an integer representing the Unicode code point of the character when the argument is a unicode object # ......................................... pow(x, y[, z]) # Return x to the power y # pow(x, y) is equivalent to using the power operator: x**y # To disable the statement and use the print() function, use this future statement at the top of your module: from __future__ import print_function # ......................................... property([fget[, fset[, fdel[, doc]]]]) # Return a property attribute for new-style classes (classes that derive from object). class C(object): def __init__(self): self._x = None def getx(self): return self._x def setx(self, value): self._x = value def delx(self): del self._x x = property(getx, setx, delx, "I'm the 'x' property.") # If then c is an instance of C, c.x will invoke the getter, c.x = value will invoke the setter and del c.x the deleter. class Parrot(object): def __init__(self): self._voltage = 100000 @property def voltage(self): """Get the current voltage.""" return self._voltage # turns the voltage() method into a “getter” for a read-only attribute with the same name class C(object): def __init__(self): self._x = None @property def x(self): """I'm the 'x' property.""" return self._x @x.setter def x(self, value): self._x = value @x.deleter def x(self): del self._x # ......................................... range(stop) range(start, stop[, step]) # This is a versatile function to create lists containing arithmetic progressions range(10) range(1, 11) range(0, 30, 5) range(0, 10, 3) range(0, -10, -1) range(0) range(1, 0) # ......................................... raw_input([prompt]) # If the prompt argument is present, it is written to standard output without a trailing newline s = raw_input('--> ') # --> # ......................................... reduce(function, iterable[, initializer]) # Apply function of two arguments cumulatively to the items of iterable, from left to right, so as to reduce the iterable to a single value def reduce(function, iterable, initializer=None): it = iter(iterable) if initializer is None: try: initializer = next(it) except StopIteration: raise TypeError('reduce() of empty sequence with no initial value') accum_value = initializer for x in it: accum_value = function(accum_value, x) return accum_value # ......................................... reload(module) # Reload a previously imported module # ......................................... repr(object) # Return a string containing a printable representation of an object. This is the same value yielded by conversions # A class can control what this function returns for its instances by defining a __repr__() method. # ......................................... reversed(seq) # Return a reverse iterator. # ......................................... round(number[, ndigits]) # Return the floating point value number rounded to ndigits digits after the decimal point # ......................................... set([iterable]) # Return a new set object, optionally with elements taken from iterable # ......................................... setattr(object, name, value) # This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value # For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123. # ......................................... slice(stop) slice(start, stop[, step]) # Return a slice object representing the set of indices specified by range(start, stop, step) # ......................................... sorted(iterable[, cmp[, key[, reverse]]]) # Return a new sorted list from the items in iterable. cmp=lambda x,y: cmp(x.lower(), y.lower()) # ......................................... staticmethod(function) # Return a static method for function. class C(object): @staticmethod def f(arg1, arg2, ...): ... # It can be called either on the class (such as C.f()) or on an instance (such as C().f()). # ......................................... str(object='') # Return a string containing a nicely printable representation of an object. # ......................................... sum(iterable[, start]) # Sums start and the items of an iterable from left to right and returns the total # ......................................... super(type[, object-or-type]) # Return a proxy object that delegates method calls to a parent or sibling class of type # Note: super() only works for new-style classes. class C(B): def method(self, arg): super(C, self).method(arg) # ......................................... tuple([iterable]) # Return a tuple whose items are the same and in the same order as iterable‘s items # ......................................... type(object) # With one argument, return the type of an object. The return value is a type object type(name, bases, dict) # With three arguments, return a new type object # This is essentially a dynamic form of the class statement. class X(object): a = 1 X = type('X', (object,), dict(a=1)) # ......................................... unichr(i) # Return the Unicode string of one character whose Unicode code is the integer i # ......................................... unicode(object='') unicode(object[, encoding[, errors]]) # Return the Unicode string version of object # ......................................... vars([object]) # Return the __dict__ attribute for a module, class, instance, or any other object with a __dict__ attribute. # Without an argument, vars() acts like locals(). # ......................................... xrange(stop) xrange(start, stop[, step]) # This function is very similar to range(), but returns an xrange object instead of a list # ......................................... zip([iterable, ...]) # This function returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables # zip() is similar to map() with an initial argument of None x = [1, 2, 3] y = [4, 5, 6] zipped = zip(x, y) zipped # [(1, 4), (2, 5), (3, 6)] x2, y2 = zip(*zipped) x == list(x2) and y == list(y2) # True # ......................................... __import__(name[, globals[, locals[, fromlist[, level]]]]) # Note: This is an advanced function that is not needed in everyday Python programming # This function is invoked by the import statement # ......................................... # ......................................... # .........................................
mit
4,427,158,441,983,250,000
46.904025
217
0.545208
false
4.106423
false
false
false
pmrowla/goonbcs
goonbcs/models.py
1
3450
# Copyright (c) 2013 Peter Rowlands from __future__ import absolute_import from flask.ext.security import UserMixin, RoleMixin from . import db class Conference(db.Model): """A college football conference""" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), unique=True) subdivision_id = db.Column(db.Integer, db.ForeignKey('subdivision.id')) teams = db.relationship('Team', backref='conference', lazy='dynamic') divisions = db.relationship('Division', backref='conference', lazy='dynamic') class Division(db.Model): """A conference division (i.e. the SEC East)""" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), unique=True) conference_id = db.Column(db.Integer, db.ForeignKey('conference.id')) teams = db.relationship('Team', backref='division', lazy='dynamic') class Poll(db.Model): """A single user's poll for a single week""" id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) week_id = db.Column(db.Integer, db.ForeignKey('week.id')) moon_poll = db.Column(db.Boolean, default=False) votes = db.relationship('Vote', backref='poll', lazy='dynamic') class Season(db.Model): id = db.Column(db.Integer, primary_key=True) year = db.Column(db.Integer, unique=True) weeks = db.relationship('Week', backref='season', lazy='dynamic') class Subdivision(db.Model): """A college football subdivision (i.e. FBS)""" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255)) conferences = db.relationship('Conference', backref='subdivision', lazy='dynamic') class Team(db.Model): """A college football team""" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255)) school = db.Column(db.String(255), unique=True) conference_id = db.Column(db.Integer, db.ForeignKey('conference.id')) division_id = db.Column(db.Integer, db.ForeignKey('division.id')) class Vote(db.Model): id = db.Column(db.Integer, primary_key=True) poll_id = db.Column(db.Integer, db.ForeignKey('poll.id')) team_id = db.Column(db.Integer, db.ForeignKey('team.id')) rank = db.Column(db.Integer) db.UniqueConstraint('poll_id', 'team_id', name='uidx_poll_team') class Week(db.Model): id = db.Column(db.Integer, primary_key=True) num = db.Column(db.Integer) season_id = db.Column(db.Integer, db.ForeignKey('season.id')) ####################### # Flask-security models ####################### roles_users = db.Table( 'roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))) class Role(db.Model, RoleMixin): id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(80), unique=True) description = db.Column(db.String(255)) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(255), unique=True) password = db.Column(db.String(255)) active = db.Column(db.Boolean()) confirmed_at = db.Column(db.DateTime()) roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic')) polls = db.relationship('Poll', backref='user', lazy='dynamic')
mit
-7,676,578,578,878,434,000
33.848485
75
0.648406
false
3.317308
false
false
false
charany1/googlecl
src/debug_util.py
2
2144
# Copyright (C) 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities that should not be distributed with source.""" __author__ = '[email protected] (Tom Miller)' import atom import inspect dull_types = [str, unicode, dict, list, type(None)] def walk_attributes(myobject, object_name, tabitem='=', step=True, tablevel=0): """Walk through attributes of an instance. Just flat out prints varying values of dir() for instances and their attributes. Args: myobject: instance to walk through object_name: Name of the instance being walked through tabitem: String to show depth into myobject. Set to '' to disable. step: bool Use raw_input('') after printing each attribute tablevel: Depth into myobject (starts at 0) Returns: NATHING! """ print tabitem*tablevel + 'Object: ' + object_name print tabitem*tablevel + 'Type: ' + str(type(myobject)) attr_list = [attr for attr in dir(myobject) if not attr.startswith('_') and not inspect.ismethod(getattr(myobject, attr))] print tabitem*tablevel + 'Attributes: ' print tabitem*tablevel + str(attr_list) dull_attr = [attr for attr in attr_list if type(getattr(myobject, attr)) in dull_types] if dull_attr: print tabitem*tablevel + '(basic attributes: ' + str(dull_attr) + ')' loopable_attr = [attr for attr in attr_list if not type(getattr(myobject, attr)) in dull_types] for attr_name in loopable_attr: new_object = getattr(myobject, attr_name) if step: raw_input('') walk_attributes(new_object, attr_name, tablevel=tablevel+1)
mit
1,641,560,307,784,319,000
34.733333
79
0.695429
false
3.702936
false
false
false
heromod/migrid
mig/edpickle.py
1
2501
#!/usr/bin/python # -*- coding: utf-8 -*- # # edpickle - a simple pickled object editor. # Copyright (C) 2009 Jonas Bardino # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Edit pickled objects on disk""" import os import sys from shared.serial import pickle if len(sys.argv) < 2: print 'Usage: %s PATH' % sys.argv[0] print 'Edit pickled object in file PATH' sys.exit(1) dirty = False path = sys.argv[1] print "opening pickle in %s" % path pickle_fd = open(path, 'rb+') obj = pickle.load(pickle_fd) print "pickled object loaded as 'obj'" while True: command = raw_input("Enter command: ") command = command.lower().strip() if command in ['o', 'open']: path = raw_input("Path to open: ") pickle_fd = open(path, 'rb+') obj = pickle.load(pickle_fd) elif command in ['h', 'help']: print "Valid commands include:" print "(d)isplay to display the opened pickled object" print "(e)dit to edit the opened pickled object" print "(o)pen to open a new pickle file" print "(c)lose to close the opened pickled object" print "(q)uit to quit pickle editor" elif command in ['d', 'display']: print obj elif command in ['e', 'edit']: edit = raw_input("Edit command: ") #eval(edit) eval(compile(edit, 'command-line', 'single')) dirty = True elif command in ['c', 'close', 'q', 'quit']: if dirty: flush = raw_input("Modified object not saved - save now?: ") if flush.lower() in ('y', 'yes'): pickle_fd.seek(0) pickle.dump(obj, pickle_fd) pickle_fd.close() obj = None if command in ('q', 'quit'): print "Closing" break else: print "unknown command '%s'" % command
gpl-2.0
-489,058,328,799,537,700
32.346667
81
0.628149
false
3.727273
false
false
false
NewAcropolis/api
app/routes/venues/rest.py
1
3126
import os from flask import ( Blueprint, current_app, jsonify, request ) from flask_jwt_extended import jwt_required from app.dao.venues_dao import ( dao_create_venue, dao_get_venues, dao_update_venue, dao_get_venue_by_id ) from app.errors import register_errors from app.routes.venues.schemas import ( post_create_venue_schema, post_create_venues_schema, post_import_venues_schema, post_update_venue_schema ) from app.models import Venue from app.schema_validation import validate venues_blueprint = Blueprint('venues', __name__) venue_blueprint = Blueprint('venue', __name__) register_errors(venues_blueprint) register_errors(venue_blueprint) @venues_blueprint.route('/venues') @jwt_required def get_venues(): venues = [e.serialize() if e else None for e in dao_get_venues()] return jsonify(venues) @venue_blueprint.route('/venue/<uuid:venue_id>', methods=['GET']) def get_venue_by_id(venue_id): current_app.logger.info('get_venue: {}'.format(venue_id)) venue = dao_get_venue_by_id(venue_id) return jsonify(venue.serialize()) @venue_blueprint.route('/venue', methods=['POST']) def create_venue(): data = request.get_json(force=True) validate(data, post_create_venue_schema) venue = Venue(**data) dao_create_venue(venue) return jsonify(venue.serialize()), 201 @venues_blueprint.route('/venues', methods=['POST']) @jwt_required def create_venues(): data = request.get_json(force=True) validate(data, post_create_venues_schema) venues = [] for item in data: venue = Venue.query.filter(Venue.name == item['name']).first() if not venue: venue = Venue(**item) venues.append(venue) dao_create_venue(venue) else: current_app.logger.info('venue already exists: {}'.format(venue.name)) return jsonify([v.serialize() for v in venues]), 201 @venues_blueprint.route('/venues/import', methods=['POST']) @jwt_required def import_venues(): data = request.get_json(force=True) validate(data, post_import_venues_schema) venues = [] for item in data: if not item["name"]: item["name"] = "Head branch" venue = Venue.query.filter(Venue.old_id == item['id']).first() if not venue: venue = Venue( old_id=item['id'], name=item['name'], address=item['address'], directions="<div>Bus: {bus}</div><div>Train: {train}</div>".format(bus=item['bus'], train=item['tube']) ) venues.append(venue) dao_create_venue(venue) else: current_app.logger.info('venue already exists: {}'.format(venue.name)) return jsonify([v.serialize() for v in venues]), 201 @venue_blueprint.route('/venue/<uuid:venue_id>', methods=['POST']) def update_venue(venue_id): data = request.get_json() validate(data, post_update_venue_schema) fetched_venue = dao_get_venue_by_id(venue_id) dao_update_venue(venue_id, **data) return jsonify(fetched_venue.serialize()), 200
mit
2,526,085,522,634,169,000
25.948276
119
0.636916
false
3.332623
false
false
false
klahnakoski/jx-sqlite
vendor/mo_collections/unique_index.py
1
5570
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Contact: Kyle Lahnakoski ([email protected]) # from __future__ import absolute_import, division, unicode_literals from mo_dots import is_data, is_sequence, tuplewrap, unwrap, wrap from mo_dots.objects import datawrap from mo_future import PY2, iteritems, Set, Mapping, Iterable from mo_logs import Log from mo_logs.exceptions import suppress_exception DEBUG = False class UniqueIndex(Set, Mapping): """ DEFINE A SET OF ATTRIBUTES THAT UNIQUELY IDENTIFIES EACH OBJECT IN A list. THIS ALLOWS set-LIKE COMPARISIONS (UNION, INTERSECTION, DIFFERENCE, ETC) WHILE STILL MAINTAINING list-LIKE FEATURES KEYS CAN BE DOT-DELIMITED PATHS TO DEEP INNER OBJECTS """ def __init__(self, keys, data=None, fail_on_dup=True): self._data = {} self._keys = tuplewrap(keys) self.count = 0 self.fail_on_dup = fail_on_dup if data: for d in data: self.add(d) def __getitem__(self, key): try: _key = value2key(self._keys, key) if len(self._keys) == 1 or len(_key) == len(self._keys): d = self._data.get(_key) return wrap(d) else: output = wrap([ d for d in self._data.values() if all(wrap(d)[k] == v for k, v in _key.items()) ]) return output except Exception as e: Log.error("something went wrong", e) def __setitem__(self, key, value): Log.error("Use add() to ad to an index") # try: # key = value2key(self._keys, key) # d = self._data.get(key) # if d != None: # Log.error("key already filled") # self._data[key] = unwrap(value) # self.count += 1 # # except Exception as e: # Log.error("something went wrong", e) def keys(self): return self._data.keys() def pop(self): output = iteritems(self._data).next()[1] self.remove(output) return wrap(output) def add(self, val): val = datawrap(val) key = value2key(self._keys, val) if key == None: Log.error("Expecting key to be not None") try: d = self._data.get(key) except Exception as e: key = value2key(self._keys, val) if d is None: self._data[key] = unwrap(val) self.count += 1 elif d is not val: if self.fail_on_dup: Log.error("{{new|json}} with key {{key|json}} already filled with {{old|json}}", key=key, new=val, old=self[val]) elif DEBUG: Log.warning("key {{key|json}} already filled\nExisting\n{{existing|json|indent}}\nValue\n{{value|json|indent}}", key=key, existing=d, value=val ) def extend(self, values): for v in values: self.add(v) def remove(self, val): key = value2key(self._keys, datawrap(val)) if key == None: Log.error("Expecting key to not be None") d = self._data.get(key) if d is None: # ALREADY GONE return else: del self._data[key] self.count -= 1 def __contains__(self, key): return self[key] != None if PY2: def __iter__(self): return (wrap(v) for v in self._data.itervalues()) else: def __iter__(self): return (wrap(v) for v in self._data.values()) def __sub__(self, other): output = UniqueIndex(self._keys, fail_on_dup=self.fail_on_dup) for v in self: if v not in other: output.add(v) return output def __and__(self, other): output = UniqueIndex(self._keys) for v in self: if v in other: output.add(v) return output def __or__(self, other): output = UniqueIndex(self._keys) for v in self: output.add(v) for v in other: with suppress_exception: output.add(v) return output def __ior__(self, other): for v in other: with suppress_exception: self.add(v) return self def __xor__(self, other): if not isinstance(other, Iterable): Log.error("Expecting other to be iterable") other = UniqueIndex(keys=self._keys, data=other, fail_on_dup=False) return (self-other) | (other-self) def __len__(self): if self.count == 0: for d in self: self.count += 1 return self.count def subtract(self, other): return self.__sub__(other) def intersect(self, other): return self.__and__(other) def value2key(keys, val): if len(keys) == 1: if is_data(val): return val[keys[0]] elif is_sequence(val): return val[0] else: return val else: if is_data(val): return datawrap({k: val[k] for k in keys}) elif is_sequence(val): return datawrap(dict(zip(keys, val))) else: Log.error("do not know what to do here")
mpl-2.0
-1,573,903,108,648,493,300
28.315789
129
0.523878
false
3.760972
false
false
false
gdetor/SI-RF-Structure
Statistics/clear_data.py
1
5369
# Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and # Nicolas P. Rougier ([email protected]) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # This file is part of the source code accompany the peer-reviewed article: # [1] "Structure of Receptive Fields in a Computational Model of Area 3b of # Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier, # Frontiers in Computational Neuroscience, 2014. # # This script applies all the filters and cleaning techniques to the ncRFs. You # have to use this script before any further statistical analysis to the data. import numpy as np from matplotlib import rc import matplotlib.pylab as plt from scipy.stats.stats import pearsonr from scipy.stats.mstats import gmean from scipy.ndimage import gaussian_filter def locate_noise( input ): n = input.shape[0] data = input.copy() count = 0 for i in range( 1,n-1 ): for j in range( 1,n-1 ): if data[i,j] != 0: if data[i+1,j] != 0 and np.sign(data[i+1,j])==np.sign(data[i,j]): count += 1 if data[i-1,j] != 0 and np.sign(data[i-1,j])==np.sign(data[i,j]): count += 1 if data[i,j-1] != 0 and np.sign(data[i,j-1])==np.sign(data[i,j]): count += 1 if data[i,j+1] != 0 and np.sign(data[i,j+1])==np.sign(data[i,j]): count += 1 if count < 2: data[i,j] = 0 count = 0 return data # Computing the area of the receptive fields according to Dicarlo's # protocol described in article "Structure of Receptive Fields in area 3b... def clear_data( RFs, n ): p = 25 Z, T = [], [] Noise = np.load( 'noise.npy' ).reshape(n*n,p,p) cRFs = np.zeros((n*n,p,p)) for i in range( n ): for j in range( n ): RF = RFs[i,j,...] # WARNING : Centering the RF s0,s1 = np.unravel_index(np.argmax(RF),RF.shape) RF = np.roll(RF,13-s0,axis=0) RF = np.roll(RF,13-s1,axis=1) # WARNING : Centering the RF # RF += Noise[i*n+j] # RF = gaussian_filter( RF, sigma=2.2 ) RF += 1.5*Noise[i*n+j] RF = gaussian_filter( RF, sigma=1.5 ) abs_max = np.max( np.abs( RF ) ) RF[np.where( ( ( RF < +0.10*abs_max ) & (RF>0) ) | ( ( RF > -0.10*abs_max ) & (RF < 0) ) ) ]=0 RF = locate_noise( RF ) cRFs[i*n+j,...] = RF exc = 50.0 * ( RF > 0).sum()/( p * p ) inh = 50.0 * ( RF < 0).sum()/( p * p ) Z.append([exc,inh]) Z = np.array(Z) np.nan_to_num(Z) print '------ Excitatory ------- Inhibitory -------' print 'Minimum :', Z[:,0].min(), Z[:,1].min() print 'Maximum :', Z[:,0].max(), Z[:,1].max() print 'Mean :', np.mean( Z[:,0] ), np.mean( Z[:,1] ) print 'Mean :', np.mean( np.log10(Z[:,0]) ), np.mean( np.log10(Z[:,1]) ) print 'SD : ', np.std( np.log10(Z[:,0]) ), np.std( np.log10(Z[:,1]) ) print 'GMean :', gmean( Z[:,0] ), gmean( Z[:,1] ) print "Pearson cor: ", pearsonr( Z[:,0], np.abs(Z[:,1]) ) return Z, cRFs # Computing the SNR of the receptive fields. def snr( signal, sigma ): k = signal.shape[0] # Filtering the input signal filtered_s = gaussian_filter( signal, sigma ) # Computing background noise noise = signal - filtered_s # Computing noise variance noise_var = np.var( noise ) # Computing signal and noise power signalPow = np.sum( signal**2 )/k noisePow = np.sum( noise**2 )/k # Computing snr and noise index snr = 10.0 * np.log10( signalPow/noisePow ) noise_index = noise_var/np.abs(signal).max() *100.0 return snr, noise_index, filtered_s # Main :p if __name__=='__main__': np.random.seed(137) RFs = np.load('real-rfs-ref.npy').reshape(32,32,25,25) n, size, bins = RFs.shape[0], RFs.shape[2], 70 Z, cRFs = clear_data( RFs, n ) np.save('areas-ref', Z) np.save('cleared-rfs', cRFs)
gpl-3.0
-1,296,385,003,780,873,000
37.625899
106
0.631589
false
3.092742
false
false
false
cheral/orange3
doc/development/source/orange-demo/orangedemo/OWLearningCurveB.py
2
13882
import sys from collections import OrderedDict from functools import reduce import numpy import sklearn.cross_validation from PyQt4.QtGui import QTableWidget, QTableWidgetItem import Orange.data import Orange.classification from Orange.widgets import widget, gui, settings from Orange.evaluation.testing import Results class OWLearningCurveB(widget.OWWidget): name = "Learning Curve (B)" description = ("Takes a data set and a set of learners and shows a " "learning curve in a table") icon = "icons/LearningCurve.svg" priority = 1010 # [start-snippet-1] inputs = [("Data", Orange.data.Table, "set_dataset", widget.Default), ("Test Data", Orange.data.Table, "set_testdataset"), ("Learner", Orange.classification.Learner, "set_learner", widget.Multiple + widget.Default)] # [end-snippet-1] #: cross validation folds folds = settings.Setting(5) #: points in the learning curve steps = settings.Setting(10) #: index of the selected scoring function scoringF = settings.Setting(0) #: compute curve on any change of parameters commitOnChange = settings.Setting(True) def __init__(self): super().__init__() # sets self.curvePoints, self.steps equidistant points from # 1/self.steps to 1 self.updateCurvePoints() self.scoring = [ ("Classification Accuracy", Orange.evaluation.scoring.CA), ("AUC", Orange.evaluation.scoring.AUC), ("Precision", Orange.evaluation.scoring.Precision), ("Recall", Orange.evaluation.scoring.Recall) ] #: input data on which to construct the learning curve self.data = None #: optional test data self.testdata = None #: A {input_id: Learner} mapping of current learners from input channel self.learners = OrderedDict() #: A {input_id: List[Results]} mapping of input id to evaluation #: results list, one for each curve point self.results = OrderedDict() #: A {input_id: List[float]} mapping of input id to learning curve #: point scores self.curves = OrderedDict() # GUI box = gui.widgetBox(self.controlArea, "Info") self.infoa = gui.widgetLabel(box, 'No data on input.') self.infob = gui.widgetLabel(box, 'No learners.') gui.separator(self.controlArea) box = gui.widgetBox(self.controlArea, "Evaluation Scores") gui.comboBox(box, self, "scoringF", items=[x[0] for x in self.scoring], callback=self._invalidate_curves) gui.separator(self.controlArea) box = gui.widgetBox(self.controlArea, "Options") gui.spin(box, self, 'folds', 2, 100, step=1, label='Cross validation folds: ', keyboardTracking=False, callback=lambda: self._invalidate_results() if self.commitOnChange else None ) gui.spin(box, self, 'steps', 2, 100, step=1, label='Learning curve points: ', keyboardTracking=False, callback=[self.updateCurvePoints, lambda: self._invalidate_results() if self.commitOnChange else None]) gui.checkBox(box, self, 'commitOnChange', 'Apply setting on any change') self.commitBtn = gui.button(box, self, "Apply Setting", callback=self._invalidate_results, disabled=True) gui.rubber(self.controlArea) # table widget self.table = gui.table(self.mainArea, selectionMode=QTableWidget.NoSelection) ########################################################################## # slots: handle input signals def set_dataset(self, data): """Set the input train dataset.""" # Clear all results/scores for id in list(self.results): self.results[id] = None for id in list(self.curves): self.curves[id] = None self.data = data if data is not None: self.infoa.setText('%d instances in input data set' % len(data)) else: self.infoa.setText('No data on input.') self.commitBtn.setEnabled(self.data is not None) def set_testdataset(self, testdata): """Set a separate test dataset.""" # Clear all results/scores for id in list(self.results): self.results[id] = None for id in list(self.curves): self.curves[id] = None self.testdata = testdata def set_learner(self, learner, id): """Set the input learner for channel id.""" if id in self.learners: if learner is None: # remove a learner and corresponding results del self.learners[id] del self.results[id] del self.curves[id] else: # update/replace a learner on a previously connected link self.learners[id] = learner # invalidate the cross-validation results and curve scores # (will be computed/updated in `_update`) self.results[id] = None self.curves[id] = None else: if learner is not None: self.learners[id] = learner # initialize the cross-validation results and curve scores # (will be computed/updated in `_update`) self.results[id] = None self.curves[id] = None if len(self.learners): self.infob.setText("%d learners on input." % len(self.learners)) else: self.infob.setText("No learners.") self.commitBtn.setEnabled(len(self.learners)) def handleNewSignals(self): if self.data is not None: self._update() self._update_curve_points() self._update_table() def _invalidate_curves(self): if self.data is not None: self._update_curve_points() self._update_table() def _invalidate_results(self): for id in self.learners: self.curves[id] = None self.results[id] = None if self.data is not None: self._update() self._update_curve_points() self._update_table() def _update(self): assert self.data is not None # collect all learners for which results have not yet been computed need_update = [(id, learner) for id, learner in self.learners.items() if self.results[id] is None] if not need_update: return learners = [learner for _, learner in need_update] self.progressBarInit() if self.testdata is None: # compute the learning curve result for all learners in one go results = learning_curve( learners, self.data, folds=self.folds, proportions=self.curvePoints, callback=lambda value: self.progressBarSet(100 * value) ) else: results = learning_curve_with_test_data( learners, self.data, self.testdata, times=self.folds, proportions=self.curvePoints, callback=lambda value: self.progressBarSet(100 * value) ) self.progressBarFinished() # split the combined result into per learner/model results results = [list(Results.split_by_model(p_results)) for p_results in results] for i, (id, learner) in enumerate(need_update): self.results[id] = [p_results[i] for p_results in results] def _update_curve_points(self): for id in self.learners: curve = [self.scoring[self.scoringF][1](x)[0] for x in self.results[id]] self.curves[id] = curve def _update_table(self): self.table.setRowCount(0) self.table.setRowCount(len(self.curvePoints)) self.table.setColumnCount(len(self.learners)) self.table.setHorizontalHeaderLabels( [learner.name for _, learner in self.learners.items()]) self.table.setVerticalHeaderLabels( ["{:.2f}".format(p) for p in self.curvePoints]) if self.data is None: return for column, curve in enumerate(self.curves.values()): for row, point in enumerate(curve): self.table.setItem( row, column, QTableWidgetItem("{:.5f}".format(point))) for i in range(len(self.learners)): sh = self.table.sizeHintForColumn(i) cwidth = self.table.columnWidth(i) self.table.setColumnWidth(i, max(sh, cwidth)) def updateCurvePoints(self): self.curvePoints = [(x + 1.)/self.steps for x in range(self.steps)] def learning_curve(learners, data, folds=10, proportions=None, random_state=None, callback=None): if proportions is None: proportions = numpy.linspace(0.0, 1.0, 10 + 1, endpoint=True)[1:] def select_proportion_preproc(data, p, rstate=None): assert 0 < p <= 1 rstate = numpy.random.RandomState(None) if rstate is None else rstate indices = rstate.permutation(len(data)) n = int(numpy.ceil(len(data) * p)) return data[indices[:n]] if callback is not None: parts_count = len(proportions) callback_wrapped = lambda part: \ lambda value: callback(value / parts_count + part / parts_count) else: callback_wrapped = lambda part: None results = [ Orange.evaluation.CrossValidation( data, learners, k=folds, preprocessor=lambda data, p=p: select_proportion_preproc(data, p), callback=callback_wrapped(i) ) for i, p in enumerate(proportions) ] return results def learning_curve_with_test_data(learners, traindata, testdata, times=10, proportions=None, random_state=None, callback=None): if proportions is None: proportions = numpy.linspace(0.0, 1.0, 10 + 1, endpoint=True)[1:] def select_proportion_preproc(data, p, rstate=None): assert 0 < p <= 1 rstate = numpy.random.RandomState(None) if rstate is None else rstate indices = rstate.permutation(len(data)) n = int(numpy.ceil(len(data) * p)) return data[indices[:n]] if callback is not None: parts_count = len(proportions) * times callback_wrapped = lambda part: \ lambda value: callback(value / parts_count + part / parts_count) else: callback_wrapped = lambda part: None results = [ [Orange.evaluation.TestOnTestData( traindata, testdata, learners, preprocessor=lambda data, p=p: select_proportion_preproc(data, p), callback=callback_wrapped(i * times + t)) for t in range(times)] for i, p in enumerate(proportions) ] results = [reduce(results_add, res, Orange.evaluation.Results()) for res in results] return results def results_add(x, y): def is_empty(res): return (getattr(res, "models", None) is None and getattr(res, "row_indices", None) is None) if is_empty(x): return y elif is_empty(y): return x assert x.data is y.data assert x.domain is y.domain assert x.predicted.shape[0] == y.predicted.shape[0] row_indices = numpy.hstack((x.row_indices, y.row_indices)) predicted = numpy.hstack((x.predicted, y.predicted)) actual = numpy.hstack((x.actual, y.actual)) xprob = getattr(x, "probabilities", None) yprob = getattr(y, "probabilities", None) if xprob is None and yprob is None: prob = None elif xprob is not None and yprob is not None: prob = numpy.concatenate((xprob, yprob), axis=1) else: raise ValueError() res = Orange.evaluation.Results() res.data = x.data res.domain = x.domain res.row_indices = row_indices res.actual = actual res.predicted = predicted res.folds = None if prob is not None: res.probabilities = prob if x.models is not None and y.models is not None: res.models = [xm + ym for xm, ym in zip(x.models, y.models)] nmodels = predicted.shape[0] xfailed = getattr(x, "failed", None) or [False] * nmodels yfailed = getattr(y, "failed", None) or [False] * nmodels assert len(xfailed) == len(yfailed) res.failed = [xe or ye for xe, ye in zip(xfailed, yfailed)] return res def main(argv=sys.argv): from PyQt4.QtGui import QApplication app = QApplication(argv) argv = app.argv() if len(argv) > 1: filename = argv[1] else: filename = "iris" data = Orange.data.Table(filename) indices = numpy.random.permutation(len(data)) traindata = data[indices[:-20]] testdata = data[indices[-20:]] ow = OWLearningCurveB() ow.show() ow.raise_() ow.set_dataset(traindata) ow.set_testdataset(testdata) l1 = Orange.classification.NaiveBayesLearner() l1.name = 'Naive Bayes' ow.set_learner(l1, 1) l2 = Orange.classification.LogisticRegressionLearner() l2.name = 'Logistic Regression' ow.set_learner(l2, 2) l4 = Orange.classification.SklTreeLearner() l4.name = "Decision Tree" ow.set_learner(l4, 3) ow.handleNewSignals() app.exec_() ow.set_dataset(None) ow.set_testdataset(None) ow.set_learner(None, 1) ow.set_learner(None, 2) ow.set_learner(None, 3) ow.handleNewSignals() return 0 if __name__=="__main__": sys.exit(main())
bsd-2-clause
929,211,082,219,131,100
33.02451
96
0.590693
false
3.962889
true
false
false
leandro86/epubcreator
epubcreator/epubbase/ebook.py
1
14784
import os from lxml import etree from epubcreator.pyepub.pyepubwriter import epub from epubcreator.epubbase import ebook_metadata, ebook_data, files, images from epubcreator.misc import utils from epubcreator.misc.options import Options, Option class Ebook(Options): OPTIONS = [Option(name="includeOptionalFiles", value=True, description="Indica si los archivos opcionales (dedicatoria.xhtml y autor.xhtml) deben incluirse en el epub " "incluso si los respectivos campos no fueron ingresados.")] def __init__(self, ebookData, metadata=None, **options): super().__init__(**options) self._ebookData = ebookData or ebook_data.EbookData() self._metadata = metadata or ebook_metadata.Metadata() def save(self, file): """ Genera y guarda el epub. @param file: un string con el directorio donde guardar el epub (no el nombre del archivo, ya que este debe generarse de acuerdo a los metadatos), o un objeto file-like. @return: el path del archivo generado, si "file" es un string. Si "file" es un objeto de tipo file-like, se retorna el nombre de archivo del epub. """ outputEpub = epub.EpubWriter() self._addEpubBaseFiles(outputEpub) self._addSectionsAndToc(outputEpub) self._addImages(outputEpub) self._addMetadata(outputEpub) epubName = self._getOutputFileName() # Compruebo si estoy ante un string (o sea, un directorio) o un objeto file-like. if isinstance(file, str): fileName = os.path.join(file, epubName) outputEpub.generate(fileName) return fileName else: outputEpub.generate(file) return epubName def _addEpubBaseFiles(self, outputEpub): synopsis = self._metadata.synopsis or ebook_metadata.Metadata.DEFAULT_SYNOPSIS title = self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE editor = self._metadata.editor or ebook_metadata.Metadata.DEFAULT_EDITOR coverModification = self._metadata.coverModification or ebook_metadata.Metadata.DEFAULT_COVER_MODIFICATION coverImage = self._metadata.coverImage or images.CoverImage(files.EpubBaseFiles.getFile(files.EpubBaseFiles.COVER_IMAGE_FILENAME)) publicationYear = self._metadata.publicationDate.year if self._metadata.publicationDate else "" authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)] author = self._getPersonsListAsText(authors)[0] translator = self._getPersonsListAsText(self._metadata.translators)[0] ilustrator = self._getPersonsListAsText(self._metadata.ilustrators)[0] # Agrego los xhtml requeridos, excepto autor.xhtml, que debe ir despúes de las secciones. outputEpub.addHtmlData(files.EpubBaseFiles.COVER_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.COVER_FILENAME)) outputEpub.addHtmlData(files.EpubBaseFiles.SYNOPSIS_FILENAME, files.EpubBaseFiles.getSynopsis(synopsis)) outputEpub.addHtmlData(files.EpubBaseFiles.TITLE_FILENAME, files.EpubBaseFiles.getTitle(author, title, self._metadata.subtitle, editor, self._metadata.collectionName, self._metadata.subCollectionName, self._metadata.collectionVolume)) outputEpub.addHtmlData(files.EpubBaseFiles.INFO_FILENAME, files.EpubBaseFiles.getInfo(self._metadata.originalTitle, author, publicationYear, translator, ilustrator, self._metadata.coverDesigner, coverModification, editor)) if self._metadata.dedication or self._options.includeOptionalFiles: dedication = self._metadata.dedication or ebook_metadata.Metadata.DEFAULT_DEDICATION outputEpub.addHtmlData(files.EpubBaseFiles.DEDICATION_FILENAME, files.EpubBaseFiles.getDedication(dedication)) outputEpub.addImageData(files.EpubBaseFiles.COVER_IMAGE_FILENAME, coverImage.toBytes()) # Agrego el resto de los archivos del epubbase. outputEpub.addImageData(files.EpubBaseFiles.EPL_LOGO_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.EPL_LOGO_FILENAME)) outputEpub.addImageData(files.EpubBaseFiles.EX_LIBRIS_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.EX_LIBRIS_FILENAME)) outputEpub.addStyleData(files.EpubBaseFiles.STYLE_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.STYLE_FILENAME)) outputEpub.addMetaFile(files.EpubBaseFiles.APPLE_XML, files.EpubBaseFiles.getFile(files.EpubBaseFiles.APPLE_XML)) def _addSectionsAndToc(self, outputEpub): def processSections(sections): navPoints = [] previousLevel = "1" for section in sections: outputEpub.addHtmlData(section.name, section.toHtml()) hs = section.xpath("//h1 | //h2 | //h3 | //h4 | //h5 | //h6") for h in hs: currentLevel = h.tag[-1] titleText = self._getTitleText(h) titleId = h.get("id") titleSrc = "{0}{1}".format(section.name, "#" + titleId if titleId else "") if currentLevel == "1": navPoints.append(outputEpub.addNavPoint(titleSrc, titleText)) else: if currentLevel < previousLevel: for i in range(int(previousLevel) - int(currentLevel) + 1): navPoints.pop() elif currentLevel == previousLevel: navPoints.pop() childNavPoint = navPoints[-1].addNavPoint(titleSrc, titleText) navPoints.append(childNavPoint) previousLevel = currentLevel # La cubierta debe ser la primera entrada en la toc. outputEpub.addNavPoint(files.EpubBaseFiles.COVER_FILENAME, "Cubierta") # El título del libro debe ser la segunda entrada en la toc. outputEpub.addNavPoint(files.EpubBaseFiles.TITLE_FILENAME, self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE) processSections(self._ebookData.iterTextSections()) authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)] authorsWithBiographyOrImage = [a for a in authors if a.biography or a.image or self._options.includeOptionalFiles] for i, author in enumerate(authorsWithBiographyOrImage): biography = author.biography or ebook_metadata.Metadata.DEFAULT_AUTHOR_BIOGRAPHY image = author.image or images.AuthorImage(files.EpubBaseFiles.getFile(files.EpubBaseFiles.AUTHOR_IMAGE_FILENAME), allowProcessing=False) title = self._getTocTitleForAuthorFile(authors) if i == 0 else None imageName = files.EpubBaseFiles.generateAuthorImageFileName(i) authorContent = files.EpubBaseFiles.getAuthor(biography, title, imageName) outputEpub.addHtmlData(files.EpubBaseFiles.generateAuthorFileName(i), authorContent) outputEpub.addImageData(imageName, image.toBytes()) if len(authorsWithBiographyOrImage) > 0: outputEpub.addNavPoint(files.EpubBaseFiles.AUTHOR_FILENAME, self._getTocTitleForAuthorFile(authors)) processSections(self._ebookData.iterNotesSections()) def _addImages(self, outputEpub): for image in self._ebookData.iterImages(): outputEpub.addImageData(image.name, image.content) def _addMetadata(self, outputEpub): authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)] author = self._getPersonsListAsText(authors) # Agrego semántica a cubierta.xhtml. outputEpub.addReference(files.EpubBaseFiles.COVER_FILENAME, "Cover", "cover") # Es necesario agregarle semántica a cover.jpg, sino algunos ereaders no la reconocen como imagen de portada. outputEpub.addCustomMetadata("cover", files.EpubBaseFiles.COVER_IMAGE_FILENAME) outputEpub.addTitle(self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE) outputEpub.addAuthor(author[0], author[1]) outputEpub.addLanguage(self._metadata.language or ebook_metadata.Metadata.DEFAULT_LANGUAGE) if self._metadata.synopsis: # En la sinopsis (el campo description) en los metadatos, no puedo tener saltos de línea. Podría directamente # eliminarlos, pero entonces el texto del párrafo B quedaría pegado al del párrafo A. Por eso es que reemplazo # los saltos de línea por un espacio. outputEpub.addDescription(utils.removeTags(self._metadata.synopsis.replace("\n", " "))) else: outputEpub.addDescription("Sinopsis") outputEpub.addPublisher("ePubLibre") # El tipo de género no interesa si debo poner uno por defecto, dado que no aparece en los metadatos del epub. genres = self._metadata.genres or [ebook_metadata.Genre("bla", "Género", "Subgéneros")] # Ordeno los géneros alfabéticamente. genres.sort(key=lambda x: (x.genreType, x.genre, x.subGenre)) genresText = [] previousGenre = "" for genre in genres: if genre.genre != previousGenre: genresText.append(genre.genre) previousGenre = genre.genre genresText.append(genre.subGenre) outputEpub.addSubject(", ".join(genresText)) if self._metadata.translators: translator = self._getPersonsListAsText(self._metadata.translators) outputEpub.addTranslator(translator[0], translator[1]) if self._metadata.ilustrators: ilustrator = self._getPersonsListAsText(self._metadata.ilustrators) outputEpub.addIlustrator(ilustrator[0], ilustrator[1]) if self._metadata.publicationDate is not None: outputEpub.addPublicationDate(self._metadata.publicationDate) if self._metadata.subCollectionName: calibreSeries = "" if self._metadata.collectionName: calibreSeries += "{0}: ".format(self._metadata.collectionName) calibreSeries += self._metadata.subCollectionName try: # Elimino los ceros a la izquierda si se trata de un número. series_index = str(int(self._metadata.collectionVolume)) except ValueError: series_index = self._metadata.collectionVolume outputEpub.addCustomMetadata("calibre:series", calibreSeries) outputEpub.addCustomMetadata("calibre:series_index", series_index) def _getOutputFileName(self): authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)] fileName = [] authorsFileAs = [author.fileAs for author in authors] if len(authorsFileAs) < 3: fileName.append(" & ".join(authorsFileAs)) else: fileName.append("AA. VV.") fileName.append(" - ") if self._metadata.subCollectionName: collection = "" if self._metadata.collectionName: collection += "[{0}] ".format(self._metadata.collectionName) collection += "[{0} {1}] ".format(self._metadata.subCollectionName, self._metadata.collectionVolume) if self._metadata.collectionName: fileName.insert(0, collection) else: fileName.append(collection) fileName.append(self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE) bookId = self._metadata.bookId or ebook_metadata.Metadata.DEFAULT_BOOK_ID editor = self._metadata.editor or ebook_metadata.Metadata.DEFAULT_EDITOR fileName.append(" [{0}] (r1.0 {1})".format(bookId, editor)) return utils.toFileName("{0}.epub".format("".join(fileName))) def _getPersonsListAsText(self, persons): """ Convierte una lista de Person a texto. Cada Person se concatena con un & (ampersand). @param persons: una lista de Person. @return: una tupla cuyo primer elemento es un string concatenado con todos los nombres, y el segundo un string concatenado con todos los file-as. """ return " & ".join((p.name for p in persons)), " & ".join((p.fileAs for p in persons)) def _getTocTitleForAuthorFile(self, authors): if not authors or (len(authors) == 1 and authors[0].gender == ebook_metadata.Person.MALE_GENDER): return "Autor" else: return "Autores" if len(authors) > 1 else "Autora" def _getTitleText(self, h): """ Retorna el texto de un título, reemplazando los tags "br" por un espacio. """ if h.xpath("descendant::br"): # No puedo modificar el element "h" directamente, sino que necesito # trabajar sobre una copia. Una deep copy es otra opción, pero creo # que va a terminar copiando todoo el tree... h = etree.fromstring(etree.tostring(h)) for br in h.xpath("descendant::br"): br.text = " " etree.strip_tags(h, "br") return "".join(h.xpath("descendant::text()"))
unlicense
1,965,789,741,287,960,000
50.632867
149
0.609237
false
4.06889
false
false
false
artminster/artminster
core/utils/fields.py
1
10035
from django.utils.translation import ugettext as _ from django.db import models, connection from django.utils.text import capfirst from itertools import chain from django.utils.html import conditional_escape from django.utils.safestring import mark_safe from django.utils.encoding import force_unicode, smart_unicode from django import forms from itertools import chain from django.conf import settings from django.contrib.admin import widgets from django.utils.html import escape from django.forms.fields import EMPTY_VALUES, Field from django.forms import ValidationError from django.db.models.signals import post_delete, post_save from south.modelsinspector import add_introspection_rules from django.db.models import OneToOneField from django.db.models.fields.related import SingleRelatedObjectDescriptor qn = connection.ops.quote_name import re uk_landline_re = re.compile(r'^[0]{1}[1-9]{1}[0-9]{9}$') uk_landline_no08or09_re = re.compile(r'^[0]{1}[1-7]{1}[0-9]{9}$') uk_mobile_re = re.compile(r'^(07)[0-9]{9}') international_number_re = re.compile(r'^[+]?([0-9]*[\.\s\-\(\)]|[0-9]+){3,24}$') from django.db.models import OneToOneField from django.db.models.fields.related import SingleRelatedObjectDescriptor class AutoSingleRelatedObjectDescriptor(SingleRelatedObjectDescriptor): def __get__(self, instance, instance_type=None): try: return super(AutoSingleRelatedObjectDescriptor, self).__get__(instance, instance_type) except self.related.model.DoesNotExist: obj = self.related.model(**{self.related.field.name: instance}) obj.save() return obj class AutoOneToOneField(OneToOneField): ''' OneToOneField creates related object on first call if it doesnt exist yet. Use it instead of original OneToOne field. example: class MyProfile(models.Model): user = AutoOneToOneField(User, primary_key=True) home_page = models.URLField(max_length=255, blank=True) icq = models.IntegerField(max_length=255, null=True) ''' def contribute_to_related_class(self, cls, related): setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related)) def south_field_triple(self): "Returns a suitable description of this field for South." from south.modelsinspector import introspector field_class = OneToOneField.__module__ + "." + OneToOneField.__name__ args, kwargs = introspector(self) return (field_class, args, kwargs) # ISO 3166-1 country names and codes adapted from http://opencountrycodes.appspot.com/python/ COUNTRIES = [ ('GB', _('United Kingdom')), ('US', _('United States')), ('AF', _('Afghanistan')), ('AX', _('Aland Islands')), ('AL', _('Albania')), ('DZ', _('Algeria')), ('AS', _('American Samoa')), ('AD', _('Andorra')), ('AO', _('Angola')), ('AI', _('Anguilla')), ('AQ', _('Antarctica')), ('AG', _('Antigua and Barbuda')), ('AR', _('Argentina')), ('AM', _('Armenia')), ('AW', _('Aruba')), ('AU', _('Australia')), ('AT', _('Austria')), ('AZ', _('Azerbaijan')), ('BS', _('Bahamas')), ('BH', _('Bahrain')), ('BD', _('Bangladesh')), ('BB', _('Barbados')), ('BY', _('Belarus')), ('BE', _('Belgium')), ('BZ', _('Belize')), ('BJ', _('Benin')), ('BM', _('Bermuda')), ('BT', _('Bhutan')), ('BO', _('Bolivia')), ('BA', _('Bosnia and Herzegovina')), ('BW', _('Botswana')), ('BV', _('Bouvet Island')), ('BR', _('Brazil')), ('BN', _('Brunei Darussalam')), ('BG', _('Bulgaria')), ('BF', _('Burkina Faso')), ('BI', _('Burundi')), ('KH', _('Cambodia')), ('CM', _('Cameroon')), ('CA', _('Canada')), ('CV', _('Cape Verde')), ('KY', _('Cayman Islands')), ('CF', _('Central African Republic')), ('TD', _('Chad')), ('CL', _('Chile')), ('CN', _('China')), ('CX', _('Christmas Island')), ('CC', _('Cocos Islands')), ('CO', _('Colombia')), ('KM', _('Comoros')), ('CG', _('Congo')), ('CD', _('Congo')), ('CK', _('Cook Islands')), ('CR', _('Costa Rica')), ('CI', _("Cote d'Ivoire")), ('HR', _('Croatia')), ('CU', _('Cuba')), ('CY', _('Cyprus')), ('CZ', _('Czech Republic')), ('DK', _('Denmark')), ('DJ', _('Djibouti')), ('DM', _('Dominica')), ('DO', _('Dominican Republic')), ('EC', _('Ecuador')), ('EG', _('Egypt')), ('SV', _('El Salvador')), ('GQ', _('Equatorial Guinea')), ('ER', _('Eritrea')), ('EE', _('Estonia')), ('ET', _('Ethiopia')), ('FK', _('Falkland Islands')), ('FO', _('Faroe Islands')), ('FJ', _('Fiji')), ('FI', _('Finland')), ('FR', _('France')), ('GF', _('French Guiana')), ('PF', _('French Polynesia')), ('GA', _('Gabon')), ('GM', _('Gambia')), ('GE', _('Georgia')), ('DE', _('Germany')), ('GH', _('Ghana')), ('GI', _('Gibraltar')), ('GR', _('Greece')), ('GL', _('Greenland')), ('GD', _('Grenada')), ('GP', _('Guadeloupe')), ('GU', _('Guam')), ('GT', _('Guatemala')), ('GG', _('Guernsey')), ('GN', _('Guinea')), ('GW', _('Guinea-Bissau')), ('GY', _('Guyana')), ('HT', _('Haiti')), ('HN', _('Honduras')), ('HK', _('Hong Kong')), ('HU', _('Hungary')), ('IS', _('Iceland')), ('IN', _('India')), ('ID', _('Indonesia')), ('IR', _('Iran')), ('IQ', _('Iraq')), ('IE', _('Ireland')), ('IM', _('Isle of Man')), ('IL', _('Israel')), ('IT', _('Italy')), ('JM', _('Jamaica')), ('JP', _('Japan')), ('JE', _('Jersey')), ('JO', _('Jordan')), ('KZ', _('Kazakhstan')), ('KE', _('Kenya')), ('KI', _('Kiribati')), ('KP', _('Korea')), ('KR', _('Korea, Republic of')), ('KW', _('Kuwait')), ('KG', _('Kyrgyzstan')), ('LA', _('Lao')), ('LV', _('Latvia')), ('LB', _('Lebanon')), ('LS', _('Lesotho')), ('LR', _('Liberia')), ('LY', _('Libyan Arab Jamahiriya')), ('LI', _('Liechtenstein')), ('LT', _('Lithuania')), ('LU', _('Luxembourg')), ('MO', _('Macao')), ('MK', _('Macedonia')), ('MG', _('Madagascar')), ('MW', _('Malawi')), ('MY', _('Malaysia')), ('MV', _('Maldives')), ('ML', _('Mali')), ('MT', _('Malta')), ('MH', _('Marshall Islands')), ('MQ', _('Martinique')), ('MR', _('Mauritania')), ('MU', _('Mauritius')), ('YT', _('Mayotte')), ('MX', _('Mexico')), ('MD', _('Moldova')), ('MC', _('Monaco')), ('MN', _('Mongolia')), ('ME', _('Montenegro')), ('MS', _('Montserrat')), ('MA', _('Morocco')), ('MZ', _('Mozambique')), ('MM', _('Myanmar')), ('NA', _('Namibia')), ('NR', _('Nauru')), ('NP', _('Nepal')), ('NL', _('Netherlands')), ('AN', _('Netherlands Antilles')), ('NC', _('New Caledonia')), ('NZ', _('New Zealand')), ('NI', _('Nicaragua')), ('NE', _('Niger')), ('NG', _('Nigeria')), ('NU', _('Niue')), ('NF', _('Norfolk Island')), ('MP', _('Northern Mariana Islands')), ('NO', _('Norway')), ('OM', _('Oman')), ('PK', _('Pakistan')), ('PW', _('Palau')), ('PA', _('Panama')), ('PG', _('Papua New Guinea')), ('PY', _('Paraguay')), ('PE', _('Peru')), ('PH', _('Philippines')), ('PN', _('Pitcairn')), ('PL', _('Poland')), ('PT', _('Portugal')), ('PR', _('Puerto Rico')), ('QA', _('Qatar')), ('RE', _('Reunion')), ('RO', _('Romania')), ('RU', _('Russian Federation')), ('RW', _('Rwanda')), ('BL', _('Saint Barthelemy')), ('SH', _('Saint Helena')), ('KN', _('Saint Kitts and Nevis')), ('LC', _('Saint Lucia')), ('MF', _('Saint Martin')), ('WS', _('Samoa')), ('SM', _('San Marino')), ('ST', _('Sao Tome and Principe')), ('SA', _('Saudi Arabia')), ('SN', _('Senegal')), ('RS', _('Serbia')), ('SC', _('Seychelles')), ('SL', _('Sierra Leone')), ('SG', _('Singapore')), ('SK', _('Slovakia')), ('SI', _('Slovenia')), ('SB', _('Solomon Islands')), ('SO', _('Somalia')), ('ZA', _('South Africa')), ('ES', _('Spain')), ('LK', _('Sri Lanka')), ('SD', _('Sudan')), ('SR', _('Suriname')), ('SJ', _('Svalbard and Jan Mayen')), ('SZ', _('Swaziland')), ('SE', _('Sweden')), ('CH', _('Switzerland')), ('SY', _('Syrian Arab Republic')), ('TW', _('Taiwan')), ('TJ', _('Tajikistan')), ('TZ', _('Tanzania')), ('TH', _('Thailand')), ('TL', _('Timor-Leste')), ('TG', _('Togo')), ('TK', _('Tokelau')), ('TO', _('Tonga')), ('TT', _('Trinidad and Tobago')), ('TN', _('Tunisia')), ('TR', _('Turkey')), ('TM', _('Turkmenistan')), ('TC', _('Turks and Caicos Islands')), ('TV', _('Tuvalu')), ('UG', _('Uganda')), ('UA', _('Ukraine')), ('AE', _('United Arab Emirates')), ('UY', _('Uruguay')), ('UZ', _('Uzbekistan')), ('VU', _('Vanuatu')), ('VE', _('Venezuela')), ('VN', _('Viet Nam')), ('VG', _('Virgin Islands, British')), ('VI', _('Virgin Islands, U.S.')), ('WF', _('Wallis and Futuna')), ('EH', _('Western Sahara')), ('YE', _('Yemen')), ('ZM', _('Zambia')), ('ZW', _('Zimbabwe')), ] class CountryField(models.CharField): def __init__(self, *args, **kwargs): kwargs.setdefault('max_length', 2) kwargs.setdefault('choices', COUNTRIES) super(CountryField, self).__init__(*args, **kwargs) def get_internal_type(self): return "CharField" # SOUTH INTROSPECTION RULES add_introspection_rules([], ["^filebrowser\.fields\.FileBrowseField"]) add_introspection_rules([], ["^artminster\.core\.utils\.fields\.CountryField"])
mit
4,693,943,054,560,884,000
30.264798
98
0.47703
false
3.151696
false
false
false
JioCloud/oslo-incubator
openstack/common/db/sqlalchemy/migration.py
1
10048
# coding: utf-8 # # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Base on code in migrate/changeset/databases/sqlite.py which is under # the following license: # # The MIT License # # Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE import distutils.version as dist_version import os import re import migrate from migrate.changeset import ansisql from migrate.changeset.databases import sqlite from migrate.versioning import util as migrate_util import sqlalchemy from sqlalchemy.schema import UniqueConstraint from openstack.common.db import exception from openstack.common.db.sqlalchemy import session as db_session from openstack.common.gettextutils import _ # noqa @migrate_util.decorator def patched_with_engine(f, *a, **kw): url = a[0] engine = migrate_util.construct_engine(url, **kw) try: kw['engine'] = engine return f(*a, **kw) finally: if isinstance(engine, migrate_util.Engine) and engine is not url: migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) engine.dispose() # TODO(jkoelker) When migrate 0.7.3 is released and nova depends # on that version or higher, this can be removed MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') if (not hasattr(migrate, '__version__') or dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): migrate_util.with_engine = patched_with_engine # NOTE(jkoelker) Delay importing migrate until we are patched from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository _REPOSITORY = None get_engine = db_session.get_engine def _get_unique_constraints(self, table): """Retrieve information about existing unique constraints of the table This feature is needed for _recreate_table() to work properly. Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x. """ data = table.metadata.bind.execute( """SELECT sql FROM sqlite_master WHERE type='table' AND name=:table_name""", table_name=table.name ).fetchone()[0] UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" return [ UniqueConstraint( *[getattr(table.columns, c.strip(' "')) for c in cols.split(",")], name=name ) for name, cols in re.findall(UNIQUE_PATTERN, data) ] def _recreate_table(self, table, column=None, delta=None, omit_uniques=None): """Recreate the table properly Unlike the corresponding original method of sqlalchemy-migrate this one doesn't drop existing unique constraints when creating a new one. """ table_name = self.preparer.format_table(table) # we remove all indexes so as not to have # problems during copy and re-create for index in table.indexes: index.drop() # reflect existing unique constraints for uc in self._get_unique_constraints(table): table.append_constraint(uc) # omit given unique constraints when creating a new table if required table.constraints = set([ cons for cons in table.constraints if omit_uniques is None or cons.name not in omit_uniques ]) self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) self.execute() insertion_string = self._modify_table(table, column, delta) table.create(bind=self.connection) self.append(insertion_string % {'table_name': table_name}) self.execute() self.append('DROP TABLE migration_tmp') self.execute() def _visit_migrate_unique_constraint(self, *p, **k): """Drop the given unique constraint The corresponding original method of sqlalchemy-migrate just raises NotImplemented error """ self.recreate_table(p[0].table, omit_uniques=[p[0].name]) def patch_migrate(): """A workaround for SQLite's inability to alter things SQLite abilities to alter tables are very limited (please read http://www.sqlite.org/lang_altertable.html for more details). E. g. one can't drop a column or a constraint in SQLite. The workaround for this is to recreate the original table omitting the corresponding constraint (or column). sqlalchemy-migrate library has recreate_table() method that implements this workaround, but it does it wrong: - information about unique constraints of a table is not retrieved. So if you have a table with one unique constraint and a migration adding another one you will end up with a table that has only the latter unique constraint, and the former will be lost - dropping of unique constraints is not supported at all The proper way to fix this is to provide a pull-request to sqlalchemy-migrate, but the project seems to be dead. So we can go on with monkey-patching of the lib at least for now. """ # this patch is needed to ensure that recreate_table() doesn't drop # existing unique constraints of the table when creating a new one helper_cls = sqlite.SQLiteHelper helper_cls.recreate_table = _recreate_table helper_cls._get_unique_constraints = _get_unique_constraints # this patch is needed to be able to drop existing unique constraints constraint_cls = sqlite.SQLiteConstraintDropper constraint_cls.visit_migrate_unique_constraint = \ _visit_migrate_unique_constraint constraint_cls.__bases__ = (ansisql.ANSIColumnDropper, sqlite.SQLiteConstraintGenerator) def db_sync(abs_path, version=None, init_version=0): """Upgrade or downgrade a database. Function runs the upgrade() or downgrade() functions in change scripts. :param abs_path: Absolute path to migrate repository. :param version: Database will upgrade/downgrade until this version. If None - database will update to the latest available version. :param init_version: Initial database version """ if version is not None: try: version = int(version) except ValueError: raise exception.DbMigrationError( message=_("version should be an integer")) current_version = db_version(abs_path, init_version) repository = _find_migrate_repo(abs_path) if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version) def db_version(abs_path, init_version): """Show the current version of the repository. :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(abs_path, init_version) return versioning_api.db_version(get_engine(), repository) else: # Some pre-Essex DB's may not be version controlled. # Require them to upgrade using Essex first. raise exception.DbMigrationError( message=_("Upgrade DB using Essex release first.")) def db_version_control(abs_path, version=None): """Mark a database as under this repository's version control. Once a database is under version control, schema changes should only be done via change scripts in this repository. :param abs_path: Absolute path to migrate repository :param version: Initial database version """ repository = _find_migrate_repo(abs_path) versioning_api.version_control(get_engine(), repository, version) return version def _find_migrate_repo(abs_path): """Get the project's change script repository :param abs_path: Absolute path to migrate repository """ global _REPOSITORY if not os.path.exists(abs_path): raise exception.DbMigrationError("Path %s not found" % abs_path) if _REPOSITORY is None: _REPOSITORY = Repository(abs_path) return _REPOSITORY
apache-2.0
-3,339,525,715,949,184,000
35.140288
79
0.695431
false
4.232098
false
false
false
enikesha/pacioli
pacioli/views.py
1
31775
# Copyright (c) 2014, Satoshi Nakamoto Institute # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import io import uuid import ast import csv import calendar from collections import OrderedDict from datetime import datetime,date from flask import flash, render_template, request, redirect, url_for, send_from_directory, send_file from pacioli import app, db, forms, models import sqlalchemy from sqlalchemy.sql import func from sqlalchemy.orm import aliased from pacioli.accounting.memoranda import process_filestorage import pacioli.accounting.ledgers as ledgers import pacioli.accounting.rates as rates import pacioli.accounting.valuations as valuations @app.route('/') def index(): return render_template("index.html") @app.route('/Configure') def configure(): return redirect(url_for('chart_of_accounts')) @app.route('/Configure/ChartOfAccounts') def chart_of_accounts(): classificationform = forms.NewClassification() accountform = forms.NewAccount() subaccountform = forms.NewSubAccount() subaccounts = models.Subaccounts.query.all() return render_template("configure/chart_of_accounts.html", subaccounts=subaccounts, classificationform=classificationform, accountform=accountform, subaccountform=subaccountform) @app.route('/Configure/ChartOfAccounts/AddClassification', methods=['POST','GET']) def add_classification(): if request.method == 'POST': form = request.form.copy().to_dict() name = form['classification'] parent = form['classificationparent'] parent = models.Elements.query.filter_by(id=parent).one() parent = parent.name classification = models.Classifications(name=name, parent=parent) db.session.add(classification) db.session.commit() return redirect(url_for('chart_of_accounts')) @app.route('/Configure/ChartOfAccounts/DeleteClassification/<classification>') def delete_classification(classification): classification = models.Classifications \ .query \ .filter_by(name=classification) \ .first() db.session.delete(classification) db.session.commit() return redirect(url_for('chart_of_accounts')) @app.route('/Configure/ChartOfAccounts/AddAccount', methods=['POST','GET']) def add_account(): if request.method == 'POST': form = request.form.copy().to_dict() name = form['account'] parent = form['accountparent'] parent = models.Classifications \ .query \ .filter_by(id=parent) \ .one() parent = parent.name account = models.Accounts(name=name, parent=parent) db.session.add(account) db.session.commit() return redirect(url_for('chart_of_accounts')) @app.route('/Configure/ChartOfAccounts/DeleteAccount/<account>') def delete_account(account): account = models.Accounts.query.filter_by(name=account).first() db.session.delete(account) db.session.commit() return redirect(url_for('chart_of_accounts')) @app.route('/Configure/ChartOfAccounts/AddSubAccount', methods=['POST','GET']) def add_subaccount(): if request.method == 'POST': form = request.form.copy().to_dict() name = form['subaccount'] parent = form['subaccountparent'] parent = models.Accounts.query.filter_by(id=parent).one() parent = parent.name subaccount = models.Subaccounts(name=name, parent=parent) db.session.add(subaccount) db.session.commit() return redirect(url_for('chart_of_accounts')) @app.route('/Configure/ChartOfAccounts/DeleteSubAccount/<subaccount>') def delete_subaccount(subaccount): subaccount = models.Accounts.query.filter_by(name=subaccount).first() db.session.delete(subaccount) db.session.commit() return redirect(url_for('chart_of_accounts')) @app.route('/Bookkeeping') def bookkeeping(): return redirect(url_for('upload_csv')) @app.route('/Bookkeeping/Memoranda/Upload', methods=['POST','GET']) def upload_csv(): filenames = '' if request.method == 'POST': uploaded_files = request.files.getlist("file[]") for file in uploaded_files: process_filestorage(file) return redirect(url_for('upload_csv')) memos = models.Memoranda \ .query \ .order_by(models.Memoranda.date.desc()) \ .all() return render_template('bookkeeping/upload.html', title = 'Upload', memos=memos) @app.route('/Bookkeeping/Memoranda/ExchangeRates') def exchange_rates(): return render_template("bookkeeping/exchange_rates.html") @app.route('/Bookkeeping/Memoranda/DownloadRates') def download_rates(): rates.download_rates() return redirect(url_for('exchange_rates')) @app.route('/Bookkeeping/Memoranda/ExchangeRates/Summarize') def summarize_rates(): rates.summarize_rates("pacioli") return redirect(url_for('exchange_rates')) @app.route('/Bookkeeping/Memoranda/ExchangeRates/Import') def import_rates(): rates.import_rates("pacioli") return redirect(url_for('exchange_rates')) @app.route('/Bookkeeping/Memoranda/ExchangeRates/CalculateGains/<method>') def calc_gains(method): valuations.calculate_bitcoin_gains(method) return redirect(url_for('exchange_rates')) @app.route('/Bookkeeping/Memoranda/Memos', methods=['POST','GET']) def memoranda(): memos = models.Memoranda \ .query \ .order_by(models.Memoranda.date.desc()) \ .all() for memo in memos: transactions = models.MemorandaTransactions \ .query \ .filter_by(memoranda_id=memo.id) \ .all() memo.count = len(transactions) return render_template('bookkeeping/memos.html', title = 'Memoranda', memos=memos) @app.route('/Bookkeeping/Memoranda/Memos/Delete/<fileName>') def delete_memoranda(fileName): memo = models.Memoranda \ .query \ .filter_by(fileName=fileName) \ .first() transactions = models.MemorandaTransactions \ .query \ .filter_by(memoranda_id=memo.id) \ .all() for transaction in transactions: journal_entry = models.JournalEntries \ .query \ .filter_by(memoranda_transactions_id=transaction.id) \ .first() ledger_entries = models.LedgerEntries \ .query \ .filter_by(journal_entry_id = journal_entry.id) \ .all() for entry in ledger_entries: db.session.delete(entry) db.session.commit() db.session.delete(journal_entry) db.session.commit() db.session.delete(transaction) db.session.commit() db.session.delete(memo) db.session.commit() return redirect(url_for('upload_csv')) @app.route('/Bookkeeping/Memoranda/Memos/<fileName>') def memo_file(fileName): memo = models.Memoranda.query.filter_by(fileName=fileName).first() fileText = memo.fileText document = io.StringIO(fileText) reader = csv.reader(document) rows = [pair for pair in reader] return render_template('bookkeeping/memo_file.html', title = 'Memo', rows=rows, fileName=fileName) @app.route('/Bookkeeping/Memoranda/Memos/Transactions') def transactions(): transactions = models.MemorandaTransactions.query.all() for transaction in transactions: transaction.details = ast.literal_eval(transaction.details) journal_entry = models.JournalEntries.query.filter_by(memoranda_transactions_id=transaction.id).first() transaction.journal_entry_id = journal_entry.id return render_template('bookkeeping/memo_transactions.html', title = 'Memo', transactions=transactions) @app.route('/Bookkeeping/Memoranda/Memos/<fileName>/Transactions') def memo_transactions(fileName): memo = models.Memoranda.query.filter_by(fileName=fileName).first() transactions = models.MemorandaTransactions.query.filter_by(memoranda_id=memo.id).all() for transaction in transactions: transaction.details = ast.literal_eval(transaction.details) journal_entry = models.JournalEntries.query.filter_by(memoranda_transactions_id=transaction.id).first() transaction.journal_entry_id = journal_entry.id return render_template('bookkeeping/memo_transactions.html', title = 'Memo', transactions=transactions, fileName=fileName) @app.route('/Bookkeeping/GeneralJournal/<currency>') def general_journal(currency): journal_entries = db.session \ .query(models.JournalEntries) \ .filter(models.JournalEntries.ledgerentries \ .any(currency=currency)) \ .join(models.LedgerEntries) \ .order_by(models.LedgerEntries.date.desc()) \ .all() for journal_entry in journal_entries: journal_entry.ledgerentries = [c for c in journal_entry.ledgerentries if c.currency == currency] return render_template('bookkeeping/general_journal.html', title = 'General Journal', journal_entries=journal_entries, currency=currency) @app.route('/Bookkeeping/GeneralJournal/Entry/<id>') def journal_entry(id): journal_entry = models.JournalEntries.query.filter_by(id = id).first() ledger_entries = models.LedgerEntries.query.filter_by(journal_entry_id = id).order_by(models.LedgerEntries.date.desc()).order_by(models.LedgerEntries.tside.desc()).all() transaction = models.MemorandaTransactions.query.filter_by(id=journal_entry.memoranda_transactions_id).first() memo = models.Memoranda.query.filter_by(id=transaction.memoranda_id).first() transaction.details = ast.literal_eval(transaction.details) print(ledger_entries) return render_template('bookkeeping/journal_entry.html', title = 'Journal Entry', journal_entry=journal_entry, ledger_entries=ledger_entries, transaction=transaction, memo=memo) @app.route('/Bookkeeping/GeneralJournal/<id>/Edit', methods=['POST','GET']) def edit_journal_entry(id): journal_entry = models.JournalEntries.query.filter_by(id = id).first() ledger_entries = models.LedgerEntries.query.filter_by(journal_entry_id = id).order_by(models.LedgerEntries.date.desc()).order_by(models.LedgerEntries.tside.desc()).all() transaction = models.MemorandaTransactions.query.filter_by(id=journal_entry.memoranda_transactions_id).first() memo = models.Memoranda.query.filter_by(id=transaction.memoranda_id).first() transaction.details = ast.literal_eval(transaction.details) return render_template('bookkeeping/journal_entry_edit.html', title = 'Journal Entry', journal_entry=journal_entry, ledger_entries=ledger_entries, transaction=transaction, memo=memo) @app.route('/Bookkeeping/GeneralLedger/<currency>') def general_ledger(currency): accountsQuery = db.session\ .query(models.LedgerEntries.ledger)\ .group_by(models.LedgerEntries.ledger).all() accounts = [] for accountResult in accountsQuery: accountName = accountResult[0] query = ledgers.query_entries(accountName, 'Monthly', currency) accounts.append(query) return render_template('bookkeeping/general_ledger.html', title = 'General Ledger', accounts=accounts, currency=currency) @app.route('/Bookkeeping/Ledger/<accountName>/<currency>/<groupby>') def ledger(accountName, currency, groupby): query = ledgers.query_entries(accountName, groupby, currency) return render_template('bookkeeping/ledger.html', title = 'Ledger', currency=currency, account=query[0], ledger_entries=query[1], groupby = groupby, accountName=accountName) @app.route('/Bookkeeping/Ledger/<accountName>/<currency>/<groupby>/<interval>') def ledger_page(accountName, currency, groupby, interval): if groupby == "Daily": interval = datetime.strptime(interval, "%m-%d-%Y") year = interval.year month = interval.month day = interval.day ledger_entries = models.LedgerEntries \ .query \ .filter_by(ledger=accountName) \ .filter_by(currency=currency) \ .filter( \ func.date_part('year', models.LedgerEntries.date)==year, \ func.date_part('month', models.LedgerEntries.date)==month, \ func.date_part('day', models.LedgerEntries.date)==day) \ .order_by(models.LedgerEntries.date) \ .order_by(models.LedgerEntries.tside.asc()) \ .all() account = ledgers.foot_account(accountName, ledger_entries, 'All') if groupby == "Monthly": interval = datetime.strptime(interval, "%m-%Y") year = interval.year month = interval.month ledger_entries = models.LedgerEntries\ .query\ .filter_by(ledger=accountName) \ .filter_by(currency=currency) \ .filter( \ func.date_part('year', models.LedgerEntries.date)==year, \ func.date_part('month', models.LedgerEntries.date)==month)\ .order_by(models.LedgerEntries.date) \ .order_by(models.LedgerEntries.tside.desc()) \ .all() account = ledgers.foot_account(accountName, ledger_entries, 'All') return render_template('bookkeeping/ledger.html', title = 'Ledger', account=account, ledger_entries=ledger_entries, groupby2 = groupby, groupby = 'All', accountName=accountName, interval=interval, currency=currency) @app.route('/Bookkeeping/TrialBalance/<currency>') def trial_balance(currency): accountsQuery = db.session \ .query(models.LedgerEntries.ledger) \ .group_by(models.LedgerEntries.ledger) \ .filter(models.LedgerEntries.currency==currency) \ .all() periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date) + '-'+ func.date_part('month', models.LedgerEntries.date)) \ .filter(models.LedgerEntries.currency==currency) \ .group_by(\ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .all() period = datetime.now() year = period.year month = period.month accounts = [] totalDebits = 0 totalCredits = 0 for accountResult in accountsQuery: accountName = accountResult[0] ledger_entries = models.LedgerEntries \ .query \ .filter_by(ledger=accountName)\ .filter_by(currency=currency) \ .filter( \ func.date_part('year', models.LedgerEntries.date)==year, func.date_part('month', models.LedgerEntries.date)==month) \ .order_by(models.LedgerEntries.date) \ .order_by(models.LedgerEntries.tside.desc()) \ .all() query = ledgers.foot_account(accountName, ledger_entries, 'All') totalDebits += query['debitBalance'] totalCredits += query['creditBalance'] accounts.append(query) return render_template('bookkeeping/trial_balance.html', currency=currency, periods=periods, period=period, accounts=accounts, totalDebits=totalDebits, totalCredits=totalCredits) @app.route('/Bookkeeping/TrialBalance/<currency>/<groupby>/<period>') def trial_balance_historical(currency, groupby, period): accountsQuery = db.session \ .query(models.LedgerEntries.ledger) \ .group_by(models.LedgerEntries.ledger) \ .filter(models.LedgerEntries.currency==currency) \ .all() periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date) + '-'+ func.date_part('month', models.LedgerEntries.date)) \ .group_by(\ func.date_part('year', models.LedgerEntries.date),\ func.date_part('month', models.LedgerEntries.date)) \ .filter(models.LedgerEntries.currency==currency) \ .all() period = datetime.strptime(period, "%Y-%m") year = period.year month = period.month day = calendar.monthrange(year, month)[1] period = datetime(year, month, day, 23, 59, 59) accounts = [] totalDebits = 0 totalCredits = 0 for accountResult in accountsQuery: accountName = accountResult[0] ledger_entries = models.LedgerEntries \ .query \ .filter_by(ledger=accountName) \ .filter_by(currency=currency) \ .filter( \ func.date_part('year', models.LedgerEntries.date)==year, \ func.date_part('month', models.LedgerEntries.date)==month) \ .order_by(models.LedgerEntries.date) \ .order_by(models.LedgerEntries.tside.desc()) \ .all() query = ledgers.foot_account(accountName, ledger_entries, 'All') totalDebits += query['debitBalance'] totalCredits += query['creditBalance'] accounts.append(query) return render_template('bookkeeping/trial_balance.html', currency=currency, periods=periods, period=period, accounts=accounts, totalDebits=totalDebits, totalCredits=totalCredits) @app.route('/FinancialStatements') def financial_statements(): return redirect(url_for('income_statement', currency='satoshis')) @app.route('/FinancialStatements/IncomeStatement/<currency>') def income_statement(currency): periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date),\ func.date_part('month', models.LedgerEntries.date)) \ .group_by( \ func.date_part('year', models.LedgerEntries.date),\ func.date_part('month', models.LedgerEntries.date)) \ .all() periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods]) period = datetime.now() period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0) period_end = datetime(period.year, period.month, period.day, 23, 59, 59, 999999) elements = db.session \ .query(models.Elements) \ .join(models.Classifications) \ .filter(models.Classifications.name.in_(['Revenues', 'Expenses', 'Gains', 'Losses']))\ .join(models.Accounts) \ .join(models.Subaccounts) \ .all() net_income = 0 for element in elements: element.classifications = [c for c in element.classifications if c.name in ['Revenues', 'Expenses', 'Gains', 'Losses']] for classification in element.classifications: for account in classification.accounts: for subaccount in account.subaccounts: subaccount.total = 0 subaccount.ledgerentries = [c for c in subaccount.ledgerentries if period_beg <= c.date <= period_end ] for ledger_entry in subaccount.ledgerentries: if ledger_entry.currency == currency: if ledger_entry.tside == 'credit': subaccount.total += ledger_entry.amount net_income += ledger_entry.amount elif ledger_entry.tside == 'debit': net_income -= ledger_entry.amount subaccount.total -= ledger_entry.amount return render_template('financial_statements/income_statement.html', title = 'Income Statement', periods = periods, currency = currency, elements = elements, net_income = net_income) @app.route('/FinancialStatements/IncomeStatement/<currency>/<period>') def income_statement_historical(currency, period): periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .group_by( \ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .all() periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods]) period = datetime.strptime(period, "%Y-%m") lastday = calendar.monthrange(period.year, period.month)[1] period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0) period_end = datetime(period.year, period.month, lastday, 23, 59, 59, 999999) elements = db.session \ .query(models.Elements) \ .join(models.Classifications) \ .filter(models.Classifications.name.in_(['Revenues', 'Expenses', 'Gains', 'Losses']))\ .join(models.Accounts) \ .join(models.Subaccounts) \ .all() net_income = 0 for element in elements: element.classifications = [c for c in element.classifications if c.name in ['Revenues', 'Expenses', 'Gains', 'Losses']] for classification in element.classifications: for account in classification.accounts: for subaccount in account.subaccounts: subaccount.total = 0 subaccount.ledgerentries = [c for c in subaccount.ledgerentries if period_beg <= c.date <= period_end ] for ledger_entry in subaccount.ledgerentries: if ledger_entry.currency == currency: if ledger_entry.tside == 'credit': net_income += ledger_entry.amount subaccount.total += ledger_entry.amount elif ledger_entry.tside == 'debit': net_income -= ledger_entry.amount subaccount.total -= ledger_entry.amount return render_template('financial_statements/income_statement.html', title = 'Income Statement', periods = periods, currency = currency, elements = elements, net_income = net_income) @app.route('/FinancialStatements/BalanceSheet/<currency>') def balance_sheet(currency): periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .group_by( \ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .all() periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods]) period = datetime.now() period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0) period_end = datetime(period.year, period.month, period.day, 23, 59, 59, 999999) elements = db.session \ .query(models.Elements) \ .join(models.Classifications) \ .join(models.Accounts) \ .join(models.Subaccounts) \ .all() retained_earnings = 0 for element in elements: element.balance = 0 for classification in element.classifications: classification.balance = 0 for account in classification.accounts: account.balance = 0 for subaccount in account.subaccounts: subaccount.balance = 0 subaccount.ledgerentries = [c for c in subaccount.ledgerentries if c.date <= period_end ] for ledger_entry in subaccount.ledgerentries: if ledger_entry.currency == currency: if ledger_entry.tside == 'credit': element.balance -= ledger_entry.amount classification.balance -= ledger_entry.amount account.balance -= ledger_entry.amount subaccount.balance -= ledger_entry.amount elif ledger_entry.tside == 'debit': element.balance += ledger_entry.amount classification.balance += ledger_entry.amount account.balance += ledger_entry.amount subaccount.balance += ledger_entry.amount if element.name == 'Equity': retained_earnings = -element.balance print(retained_earnings) elements = [c for c in elements if c.name in ['Assets', 'Liabilities']] return render_template('financial_statements/balance_sheet.html', periods=periods, currency=currency, elements=elements, retained_earnings=retained_earnings, period=period_end) @app.route('/FinancialStatements/BalanceSheet/<currency>/<period>') def balance_sheet_historical(currency, period): periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .group_by( \ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .all() periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods]) period = datetime.strptime(period, "%Y-%m") lastday = calendar.monthrange(period.year, period.month)[1] period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0) period_end = datetime(period.year, period.month, lastday, 23, 59, 59, 999999) elements = db.session \ .query(models.Elements) \ .join(models.Classifications) \ .join(models.Accounts) \ .join(models.Subaccounts) \ .all() retained_earnings = 0 for element in elements: element.balance = 0 for classification in element.classifications: classification.balance = 0 for account in classification.accounts: account.balance = 0 for subaccount in account.subaccounts: subaccount.balance = 0 subaccount.ledgerentries = [c for c in subaccount.ledgerentries if c.date <= period_end ] for ledger_entry in subaccount.ledgerentries: if ledger_entry.currency == currency: if ledger_entry.tside == 'credit': element.balance -= ledger_entry.amount classification.balance -= ledger_entry.amount account.balance -= ledger_entry.amount subaccount.balance -= ledger_entry.amount elif ledger_entry.tside == 'debit': element.balance += ledger_entry.amount classification.balance += ledger_entry.amount account.balance += ledger_entry.amount subaccount.balance += ledger_entry.amount if element.name == 'Equity': retained_earnings = -element.balance print(retained_earnings) elements = [c for c in elements if c.name in ['Assets', 'Liabilities']] return render_template('financial_statements/balance_sheet.html', periods=periods, currency=currency, elements=elements, retained_earnings=retained_earnings, period=period_end) @app.route('/FinancialStatements/StatementOfCashFlows/<currency>/<period>') def statement_of_cash_flows(currency, period): periods = db.session \ .query(\ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .group_by( \ func.date_part('year', models.LedgerEntries.date), \ func.date_part('month', models.LedgerEntries.date)) \ .all() periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods]) if period == 'Current': period = datetime.now() lastday = period.day else: period = datetime.strptime(period, "%Y-%m") lastday = calendar.monthrange(period.year, period.month)[1] period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0) period_end = datetime(period.year, period.month, lastday, 23, 59, 59, 999999) elements = db.session \ .query(models.Elements) \ .join(models.Classifications) \ .filter(models.Classifications.name.in_(['Revenues', 'Expenses', 'Gains', 'Losses']))\ .join(models.Accounts) \ .join(models.Subaccounts) \ .all() net_income = 0 for element in elements: element.classifications = [c for c in element.classifications if c.name in ['Revenues', 'Expenses', 'Gains', 'Losses']] for classification in element.classifications: classification.balance = 0 for account in classification.accounts: account.balance = 0 for subaccount in account.subaccounts: subaccount.balance = 0 subaccount.ledgerentries = [c for c in subaccount.ledgerentries if period_beg <= c.date <= period_end ] for ledger_entry in subaccount.ledgerentries: if ledger_entry.currency == currency: if ledger_entry.tside == 'credit': classification.balance -= ledger_entry.amount account.balance -= ledger_entry.amount subaccount.balance -= ledger_entry.amount elif ledger_entry.tside == 'debit': classification.balance += ledger_entry.amount account.balance += ledger_entry.amount subaccount.balance += ledger_entry.amount return render_template('financial_statements/statement_of_cash_flows.html', period = period, periods = periods, currency = currency, elements = elements, net_income = net_income)
bsd-3-clause
207,046,590,486,542,980
43.070735
757
0.628419
false
3.976348
false
false
false
rdmorganiser/rdmo
rdmo/projects/models/value.py
1
6697
import mimetypes from pathlib import Path import iso8601 from django.db import models from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from django_cleanup import cleanup from rdmo.core.constants import (VALUE_TYPE_BOOLEAN, VALUE_TYPE_CHOICES, VALUE_TYPE_DATETIME, VALUE_TYPE_TEXT) from rdmo.core.models import Model from rdmo.domain.models import Attribute from rdmo.options.models import Option from ..managers import ValueManager from ..utils import get_value_path def get_file_upload_to(instance, filename): return str(get_value_path(instance.project, instance.snapshot) / str(instance.id) / filename) class Value(Model): objects = ValueManager() FALSE_TEXT = [None, '', '0', 'f', 'F', 'false', 'False'] project = models.ForeignKey( 'Project', on_delete=models.CASCADE, related_name='values', verbose_name=_('Project'), help_text=_('The project this value belongs to.') ) snapshot = models.ForeignKey( 'Snapshot', blank=True, null=True, on_delete=models.CASCADE, related_name='values', verbose_name=_('Snapshot'), help_text=_('The snapshot this value belongs to.') ) attribute = models.ForeignKey( Attribute, blank=True, null=True, on_delete=models.SET_NULL, related_name='values', verbose_name=_('Attribute'), help_text=_('The attribute this value belongs to.') ) set_index = models.IntegerField( default=0, verbose_name=_('Set index'), help_text=_('The position of this value in an entity collection (i.e. in the question set)') ) collection_index = models.IntegerField( default=0, verbose_name=_('Collection index'), help_text=_('The position of this value in an attribute collection.') ) text = models.TextField( blank=True, verbose_name=_('Text'), help_text=_('The string stored for this value.') ) option = models.ForeignKey( Option, blank=True, null=True, on_delete=models.SET_NULL, related_name='values', verbose_name=_('Option'), help_text=_('The option stored for this value.') ) file = models.FileField( upload_to=get_file_upload_to, null=True, blank=True, verbose_name=_('File'), help_text=_('The file stored for this value.') ) value_type = models.CharField( max_length=8, choices=VALUE_TYPE_CHOICES, default=VALUE_TYPE_TEXT, verbose_name=_('Value type'), help_text=_('Type of this value.') ) unit = models.CharField( max_length=64, blank=True, verbose_name=_('Unit'), help_text=_('Unit for this value.') ) external_id = models.CharField( max_length=256, blank=True, verbose_name=_('External id'), help_text=_('External id for this value.') ) class Meta: ordering = ('attribute', 'set_index', 'collection_index') verbose_name = _('Value') verbose_name_plural = _('Values') @property def as_dict(self): value_dict = { 'id': self.id, 'created': self.created, 'updated': self.updated, 'set_index': self.set_index, 'collection_index': self.collection_index, 'value_type': self.value_type, 'unit': self.unit, 'external_id': self.external_id, 'value': self.value, 'value_and_unit': self.value_and_unit, 'is_true': self.is_true, 'is_false': self.is_false, 'as_number': self.as_number } if self.file: value_dict.update({ 'file_name': self.file_name, 'file_url': self.file_url, 'file_type': self.file_type, 'file_path': self.file_path }) return value_dict @property def value(self): if self.option: value = self.option.text or '' if self.option.additional_input and self.text: value += ': ' + self.text return value elif self.file: return self.file_name elif self.text: if self.value_type == VALUE_TYPE_DATETIME: try: return iso8601.parse_date(self.text).date() except iso8601.ParseError: return self.text elif self.value_type == VALUE_TYPE_BOOLEAN: if self.text == '1': return _('Yes') else: return _('No') else: return self.text else: return None @property def value_and_unit(self): value = self.value if value is None: return '' elif self.unit: return '%s %s' % (value, self.unit) else: return value @property def is_true(self): return self.text not in self.FALSE_TEXT @property def is_false(self): return self.text in self.FALSE_TEXT @property def as_number(self): try: val = self.text except AttributeError: return 0 else: if isinstance(val, str): val = val.replace(',', '.') if isinstance(val, float) is False: try: return int(val) except (ValueError, TypeError): pass try: return float(val) except (ValueError, TypeError): return 0 else: return val @property def file_name(self): if self.file: return Path(self.file.name).name @property def file_url(self): if self.file: return reverse('v1-projects:value-file', args=[self.id]) @property def file_type(self): if self.file: return mimetypes.guess_type(self.file.name)[0] @property def file_path(self): if self.file: resource_path = get_value_path(self.project, self.snapshot) return Path(self.file.name).relative_to(resource_path).as_posix() def copy_file(self, file_name, file_content): # copies a file field from a different value over to this value # this is tricky, because we need to trick django_cleanup to not delete the original file # important for snapshots and import from projects self.file.save(file_name, file_content, save=False) cleanup.refresh(self) self.save()
apache-2.0
2,072,839,092,465,988,000
30.148837
100
0.558907
false
4.20138
false
false
false
sxjscience/tvm
tests/python/relay/test_ir_parser.py
1
22635
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te from tvm import relay import tvm.relay.testing import pytest from numpy import isclose from typing import Union from functools import wraps SEMVER = '#[version = "0.0.5"]\n' BINARY_OPS = { "*": relay.multiply, "/": relay.divide, "+": relay.add, "-": relay.subtract, "<": relay.less, ">": relay.greater, "<=": relay.less_equal, ">=": relay.greater_equal, "==": relay.equal, "!=": relay.not_equal, } TYPES = { "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float16", "float32", "float64", "bool", "int8x4", "uint1x4", "float16x4", } LIST_DEFN = """ type List[A] { Cons(A, List[A]), Nil, } """ def assert_graph_equal(lhs, rhs): tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars=True) def graph_equal(lhs, rhs): return tvm.ir.structural_equal(lhs, rhs, map_free_vars=True) def roundtrip_expr(expr): text = tvm.relay.Expr.astext(expr, show_meta_data=False) x = tvm.parser.parse_expr(text) assert_graph_equal(x, expr) # Testing Utilities for expressions. def roundtrip(expr): x = tvm.parser.fromtext(expr.astext()) assert_graph_equal(x, expr) def parse_text(code): expr = tvm.parser.parse_expr(code) roundtrip_expr(expr) return expr def parses_as(code, expr): # type: (str, relay.Expr) -> bool parsed = parse_text(code) result = graph_equal(parsed, expr) return result # Testing Utilities for full modules. def parse_module(code): mod = tvm.parser.parse(SEMVER + code) roundtrip(mod) return mod def assert_parses_as(code, expr): parsed = parse_text(code) assert_graph_equal(parsed, expr) def assert_parse_module_as(code, mod): mod = tvm.relay.transform.InferType()(mod) parsed = parse_module(code) assert_graph_equal(parsed, mod) def get_scalar(x): # type: (relay.Constant) -> (Union[float, int, bool]) return x.data.asnumpy().item() int32 = relay.scalar_type("int32") _ = relay.Var("_") X = relay.Var("x") Y = relay.Var("y") X_ANNO = relay.Var("x", int32) Y_ANNO = relay.Var("y", int32) UNIT = relay.Tuple([]) def test_comments(): assert_parses_as( """ // This is a line comment! () """, UNIT, ) assert_parses_as( """ /* This is a block comment! This is still a block comment! */ () """, UNIT, ) assert_parses_as( """ /* This is a block comment! /*Block comment is recursive!*/ */ () """, UNIT, ) def test_int_literal(): assert isinstance(parse_text("1"), relay.Constant) assert isinstance(parse_text("1").data, tvm.nd.NDArray) assert get_scalar(parse_text("1")) == 1 assert get_scalar(parse_text("10")) == 10 assert get_scalar(parse_text("0")) == 0 assert get_scalar(parse_text("-100")) == -100 assert get_scalar(parse_text("-05")) == -5 def test_float_literal(): assert get_scalar(parse_text("1.0f")) == 1.0 assert isclose(get_scalar(parse_text("1.56667f")), 1.56667) assert get_scalar(parse_text("0.0f")) == 0.0 assert get_scalar(parse_text("-10.0f")) == -10.0 # scientific notation assert isclose(get_scalar(parse_text("1e-1f")), 1e-1) assert get_scalar(parse_text("1e+1f")) == 1e1 assert isclose(get_scalar(parse_text("1E-1f")), 1e-1) assert get_scalar(parse_text("1E+1f")) == 1e1 assert isclose(get_scalar(parse_text("1.0e-1f")), 1.0e-1) assert get_scalar(parse_text("1.0e+1f")) == 1.0e1 assert isclose(get_scalar(parse_text("1.0E-1f")), 1.0e-1) assert get_scalar(parse_text("1.0E+1f")) == 1.0e1 def test_bool_literal(): assert get_scalar(parse_text("True")) == True assert get_scalar(parse_text("False")) == False def test_negative(): # need to handle parsing non-literal operations # assert isinstance(parse_text("let %x = 1; -%x").body, relay.Call) assert get_scalar(parse_text("--10")) == 10 assert get_scalar(parse_text("---10")) == -10 def test_bin_op(): for bin_op in BINARY_OPS.keys(): assert_parses_as( "1 {} 1".format(bin_op), BINARY_OPS.get(bin_op)(relay.const(1), relay.const(1)) ) def test_parens(): assert graph_equal(parse_text("1 * 1 + 1"), parse_text("(1 * 1) + 1")) assert not graph_equal(parse_text("1 * 1 + 1"), parse_text("1 * (1 + 1)")) def test_op_assoc(): assert graph_equal(parse_text("1 * 1 + 1 < 1 == 1"), parse_text("(((1 * 1) + 1) < 1) == 1")) assert graph_equal(parse_text("1 == 1 < 1 + 1 * 1"), parse_text("1 == (1 < (1 + (1 * 1)))")) def test_vars(): # var var = parse_text("let %foo = (); %foo") assert isinstance(var.body, relay.Var) assert var.body.name_hint == "foo" # global var global_var = parse_text("@foo") assert isinstance(global_var, relay.GlobalVar) assert global_var.name_hint == "foo" # operator id op = parse_text("add") assert isinstance(op, tvm.ir.Op) assert op.name == "add" # operator id with prefix op = parse_text("nn.global_avg_pool2d") assert isinstance(op, tvm.ir.Op) assert op.name == "nn.global_avg_pool2d" def test_meta_ref(): with pytest.raises(tvm.error.DiagnosticError): meta_op = parse_text("meta[type_key][1337]") assert meta_op.attrs.node_type_key == "type_key" assert meta_op.attrs.node_index == 1337 def test_let(): assert_parses_as("let %x = 1; ()", relay.Let(X, relay.const(1), UNIT)) assert_parses_as( """ let %x = 1; let %y = 2; () """, relay.Let(X, relay.const(1), relay.Let(Y, relay.const(2), UNIT)), ) def test_seq(): assert_parses_as("(); ()", relay.Let(_, UNIT, UNIT)) assert_parses_as("let %_ = 1; ()", relay.Let(X, relay.const(1), UNIT)) def test_graph(): code = "%0 = (); %1 = 1; (%0, %0, %1)" assert_parses_as(code, relay.Tuple([UNIT, UNIT, relay.const(1)])) def test_graph_single(): assert_parses_as("%1 = (); %1", relay.Tuple([])) def test_let_global_var(): with pytest.raises(tvm.error.DiagnosticError): parse_text("let @x = 1; ()") def test_let_op(): with pytest.raises(tvm.error.DiagnosticError): parse_text("let x = 1; ()") def test_tuple(): assert_parses_as("()", relay.Tuple([])) assert_parses_as("(0,)", relay.Tuple([relay.const(0)])) assert_parses_as("(0, 1)", relay.Tuple([relay.const(0), relay.const(1)])) assert_parses_as("(0, 1, 2)", relay.Tuple([relay.const(0), relay.const(1), relay.const(2)])) def test_tuple_proj(): x = relay.var("x", shape=()) assert_parses_as( "free_var %x: float32; %x((%x,).0, %x)", relay.Call(x, [relay.TupleGetItem(relay.Tuple([x]), 0), x]), ) def test_func(): # 0 args assert_parses_as("fn () { 0 }", relay.Function([], relay.const(0), None, [])) # 1 arg assert_parses_as("fn (%x) { %x }", relay.Function([X], X, None, [])) # 2 args assert_parses_as("fn (%x, %y) { %x + %y }", relay.Function([X, Y], relay.add(X, Y), None, [])) # annotations assert_parses_as("fn (%x: int32) -> int32 { %x }", relay.Function([X_ANNO], X_ANNO, int32, [])) # Refactor the attribute syntax and printing. # # # attributes # assert_parses_as( # "fn (n=5) { () }", # relay.Function([], UNIT, None, None, tvm.ir.make_node("DictAttrs", n=relay.const(5))) # ) # TODO(@jmp): Crashes if %x isn't annnotated. def test_defn(): id_defn = parse_module( """ def @id(%x: int32) -> int32 { %x } """ ) assert isinstance(id_defn, tvm.IRModule) def test_recursive_call(): id_defn = parse_module( """ def @id(%x: int32) -> int32 { @id(%x) } """ ) assert isinstance(id_defn, tvm.IRModule) def test_ifelse(): assert_parses_as( """ if (True) { 0 } else { 1 } """, relay.If(relay.const(True), relay.const(0), relay.const(1)), ) def test_ifelse_scope(): with pytest.raises(tvm.error.DiagnosticError): parse_text( """ if (True) { let %x = (); () } else { %x } """ ) def test_ref(): program = """ #[version = "0.0.5"] def @main(%x: float32) { %0 = ref(%x); ref_write(%0, 1f); ref_read(%0) } """ tvm.parser.parse(program) def test_call(): # select right function to call: simple ident case id_func = relay.Var("id") assert_parses_as( """ let %id = fn (%x) { %x }; 10 * %id(10) """, relay.Let( id_func, relay.Function([X], X, None, []), relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)])), ), ) # 0 args constant = relay.Var("constant") assert_parses_as( """ let %constant = fn () { 0 }; %constant() """, relay.Let( constant, relay.Function([], relay.const(0), None, []), relay.Call(constant, [], None, None), ), ) # 1 arg id_var = relay.Var("id") assert_parses_as( """ let %id = fn (%x) { %x }; %id(1) """, relay.Let( id_var, relay.Function([X], X, None, []), relay.Call(id_var, [relay.const(1)], None, None), ), ) # 2 args multiply = relay.Var("multiply") assert_parses_as( """ let %multiply = fn (%x, %y) { %x * %y }; %multiply(0, 0) """, relay.Let( multiply, relay.Function([X, Y], relay.multiply(X, Y), None, []), relay.Call(multiply, [relay.const(0), relay.const(0)], None, None), ), ) # anonymous function assert_parses_as( """ (fn (%x) { %x })(0) """, relay.Call(relay.Function([X], X, None, []), [relay.const(0)], None, None), ) # curried function curried_mult = relay.Var("curried_mult") assert_parses_as( """ let %curried_mult = fn (%x) { fn (%y) { %x * %y } }; %curried_mult(0); %curried_mult(0)(0) """, relay.Let( curried_mult, relay.Function([X], relay.Function([Y], relay.multiply(X, Y), None, []), None, []), relay.Let( _, relay.Call(curried_mult, [relay.const(0)], None, None), relay.Call( relay.Call(curried_mult, [relay.const(0)], None, None), [relay.const(0)], None, None, ), ), ), ) # op assert_parses_as("abs(1)", relay.Call(relay.op.get("abs"), [relay.const(1)], None, None)) # Types def test_incomplete_type(): assert_parses_as("let %_ : _ = (); ()", relay.Let(_, UNIT, UNIT)) def test_builtin_types(): for builtin_type in TYPES: parse_text("let %_ : {} = (); ()".format(builtin_type)) def test_tensor_type(): assert_parses_as( "let %_ : Tensor[(), float32] = (); ()", relay.Let(relay.Var("_", relay.TensorType((), "float32")), UNIT, UNIT), ) assert_parses_as( "let %_ : Tensor[(1), float32] = (); ()", relay.Let(relay.Var("_", relay.TensorType((1,), "float32")), UNIT, UNIT), ) assert_parses_as( "let %_ : Tensor[(1, 1), float32] = (); ()", relay.Let(relay.Var("_", relay.TensorType((1, 1), "float32")), UNIT, UNIT), ) assert_parses_as( "let %_ : Tensor[(?, 1), float32] = (); ()", relay.Let(relay.Var("_", relay.TensorType((tvm.tir.Any(), 1), "float32")), UNIT, UNIT), ) def test_function_type(): assert_parses_as( """ let %_: fn () -> int32 = fn () -> int32 { 0 }; () """, relay.Let( relay.Var("_", relay.FuncType([], int32, [], [])), relay.Function([], relay.const(0), int32, []), UNIT, ), ) assert_parses_as( """ let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; () """, relay.Let( relay.Var("_", relay.FuncType([int32], int32, [], [])), relay.Function([relay.Var("x", int32)], relay.const(0), int32, []), UNIT, ), ) assert_parses_as( """ let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; () """, relay.Let( relay.Var("_", relay.FuncType([int32, int32], int32, [], [])), relay.Function( [relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, [] ), UNIT, ), ) def test_tuple_type(): assert_parses_as( """ let %_: () = (); () """, relay.Let(relay.Var("_", relay.TupleType([])), UNIT, UNIT), ) assert_parses_as( """ let %_: (int32,) = (0,); () """, relay.Let(relay.Var("_", relay.TupleType([int32])), relay.Tuple([relay.const(0)]), UNIT), ) assert_parses_as( """ let %_: (int32, int32) = (0, 1); () """, relay.Let( relay.Var("_", relay.TupleType([int32, int32])), relay.Tuple([relay.const(0), relay.const(1)]), UNIT, ), ) def test_adt_defn(): mod = tvm.IRModule() glob_typ_var = relay.GlobalTypeVar("Ayy") prog = relay.TypeData(glob_typ_var, [], [relay.Constructor("Nil", [], glob_typ_var)]) mod[glob_typ_var] = prog assert_parse_module_as( """ type Ayy { Nil } """, mod, ) def test_adt_any(): code = """ type my_dtype { my_cons(Tensor[(?, 1), uint16]), } """ mod = parse_module(code) items = mod.type_definitions.items() global_type_var, type_data = items[0] assert global_type_var.name_hint == "my_dtype" ctors = type_data.constructors assert len(ctors) == 1 my_cons = ctors[0] assert my_cons.name_hint == "my_cons" ty_shape = my_cons.inputs[0].shape assert isinstance(ty_shape[0], tvm.tir.Any) assert ty_shape[1] == 1 def test_empty_adt_defn(): mod = tvm.IRModule() glob_typ_var = relay.GlobalTypeVar("Ayy") prog = relay.TypeData(glob_typ_var, [], []) mod[glob_typ_var] = prog assert_parse_module_as( """ type Ayy { } """, mod, ) def test_multiple_cons_defn(): mod = tvm.IRModule() list_var = relay.GlobalTypeVar("List") typ_var = relay.TypeVar("A") prog = relay.TypeData( list_var, [typ_var], [ relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var), relay.Constructor("Nil", [], list_var), ], ) mod[list_var] = prog assert_parse_module_as(LIST_DEFN, mod) def test_multiple_type_param_defn(): glob_typ_var = relay.GlobalTypeVar("Either") typ_var_a = relay.TypeVar("A") typ_var_b = relay.TypeVar("B") prog = relay.TypeData( glob_typ_var, [typ_var_a, typ_var_b], [ relay.Constructor("Left", [typ_var_a], glob_typ_var), relay.Constructor("Right", [typ_var_b], glob_typ_var), ], ) mod = tvm.IRModule() mod[glob_typ_var] = prog assert_parse_module_as( """ type Either[A, B] { Left(A), Right(B), } """, mod, ) def test_match(): # pair each match keyword with whether it specifies a complete match or not match_keywords = [("match", True), ("match?", False)] for (match_keyword, is_complete) in match_keywords: mod = tvm.IRModule() list_var = relay.GlobalTypeVar("List") typ_var = relay.TypeVar("A") cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var) nil_constructor = relay.Constructor("Nil", [], list_var) list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor]) mod[list_var] = list_def length_var = relay.GlobalVar("length") typ_var = relay.TypeVar("A") input_type = list_var(typ_var) input_var = relay.Var("xs", input_type) rest_var = relay.Var("rest") cons_case = relay.Let( relay.var("", type_annotation=None), UNIT, relay.add(relay.const(1), relay.Call(length_var, [rest_var])), ) body = relay.Match( input_var, [ relay.Clause( relay.PatternConstructor( cons_constructor, [relay.PatternWildcard(), relay.PatternVar(rest_var)] ), cons_case, ), relay.Clause(relay.PatternConstructor(nil_constructor, []), relay.const(0)), ], complete=is_complete, ) length_func = relay.Function([input_var], body, int32, [typ_var]) mod[length_var] = length_func assert_parse_module_as( """ %s def @length[A](%%xs: List[A]) -> int32 { %s (%%xs) { Cons(_, %%rest : List[A]) => { (); 1 + @length(%%rest) }, Nil => 0, } } """ % (LIST_DEFN, match_keyword), mod, ) def test_adt_cons_expr(): mod = tvm.IRModule() list_var = relay.GlobalTypeVar("List") typ_var = relay.TypeVar("A") cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var) nil_constructor = relay.Constructor("Nil", [], list_var) list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor]) mod[list_var] = list_def make_singleton_var = relay.GlobalVar("make_singleton") input_var = relay.Var("x", int32) make_singleton_func = relay.Function( [input_var], cons_constructor(input_var, nil_constructor()), list_var(int32) ) mod[make_singleton_var] = make_singleton_func assert_parse_module_as( """ %s def @make_singleton(%%x: int32) -> List[int32] { Cons(%%x, Nil) } """ % LIST_DEFN, mod, ) def test_duplicate_adt_defn(): with pytest.raises(tvm.error.DiagnosticError): parse_module( """ %s type List[A] { Cons(A, List[A]), Nil, } """ % LIST_DEFN ) def test_duplicate_adt_cons(): with pytest.raises(tvm.error.DiagnosticError): parse_text( """ type Ayy { Lmao } type Haha { Lmao } """ ) def test_duplicate_adt_cons_defn(): with pytest.raises(tvm.error.DiagnosticError): parse_text( """ type Ayy { Lmao } type Lmao { Ayy } """ ) def test_duplicate_global_var(): with pytest.raises(tvm.error.DiagnosticError): parse_text( """ def @id[A](%x: A) -> A { x } def @id[A](%x: A) -> A { x } """ ) def test_extern_adt_defn(): mod = tvm.IRModule() extern_var = relay.GlobalTypeVar("T") typ_var = relay.TypeVar("A") extern_def = relay.TypeData(extern_var, [typ_var], []) mod[extern_var] = extern_def assert_parse_module_as( """ extern type T[A] """, mod, ) def test_import_grad(): mod = tvm.IRModule() mod.import_from_std("gradient.rly") def test_resnet(): mod, _ = relay.testing.resnet.get_workload() text = mod.astext() parsed_mod = tvm.parser.parse(text) tvm.ir.assert_structural_equal(mod, parsed_mod) def inline_params(mod, params): main_fn = mod["main"] str_to_var = {} for param in main_fn.params: str_to_var[param.name_hint] = param bind_map = {} for param in params: bind_map[str_to_var[param]] = relay.const(params[param]) body = relay.bind(main_fn.body, bind_map) main_fn = relay.Function(relay.analysis.free_vars(body), body) mod._add("main", main_fn, True) return mod def test_resnet_inlined_params(): mod, params = relay.testing.resnet.get_workload() mod = inline_params(mod, params) mod = relay.transform.InferType()(mod) text = mod.astext() parsed_mod = tvm.parser.parse(text) tvm.ir.assert_structural_equal(mod, parsed_mod) def test_tuple_return_value(): program = """ type Box[T] { constructor(T) } def @example() { %0 = (); %1 = constructor(%0); %2 = constructor(0f); (%1, %2,) } """ parse_module(program) def test_op_string_attr(): call = parse_text( """ free_var %x: Tensor[(1, 32, 32, 3), float32]; free_var %y: Tensor[(1, 1, 3, 3), float32]; nn.conv2d(%x, %y, data_layout="NHWC", kernel_layout="HWIO") """ ) assert isinstance(call.op, tvm.ir.Op) assert call.op.name == "nn.conv2d" assert call.attrs.data_layout == "NHWC" assert call.attrs.kernel_layout == "HWIO" def test_load_prelude(): mod = tvm.IRModule() mod.import_from_std("prelude.rly") tvm.parser.parse(mod.astext()) if __name__ == "__main__": import sys pytest.main(sys.argv)
apache-2.0
-6,946,301,590,166,049,000
24.094235
99
0.522377
false
3.243301
true
false
false
xhchrn/gegan
train.py
1
3732
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import import tensorflow as tf import argparse from model.gegan import GEGAN parser = argparse.ArgumentParser(description='Train') parser.add_argument('--experiment_dir', dest='experiment_dir', required=True, help='experiment directory, data, samples,checkpoints,etc') parser.add_argument('--experiment_id', dest='experiment_id', type=int, default=0, help='sequence id for the experiments you prepare to run') parser.add_argument('--image_size', dest='image_size', type=int, default=64, help="size of your input and output image") parser.add_argument('--L1_penalty', dest='L1_penalty', type=int, default=100, help='weight for L1 loss') parser.add_argument('--Lconst_penalty', dest='Lconst_penalty', type=int, default=15, help='weight for const loss') parser.add_argument('--Ltv_penalty', dest='Ltv_penalty', type=float, default=0.0, help='weight for tv loss') parser.add_argument('--Lcategory_penalty', dest='Lcategory_penalty', type=float, default=1.0, help='weight for category loss') parser.add_argument('--embedding_num', dest='embedding_num', type=int, default=2, help="number for distinct embeddings") parser.add_argument('--embedding_dim', dest='embedding_dim', type=int, default=64, help="dimension for embedding") parser.add_argument('--epoch', dest='epoch', type=int, default=100, help='number of epoch') parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='number of examples in batch') parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam') parser.add_argument('--schedule', dest='schedule', type=int, default=10, help='number of epochs to half learning rate') parser.add_argument('--resume', dest='resume', type=int, default=1, help='resume from previous training') parser.add_argument('--freeze_encoder', dest='freeze_encoder', type=int, default=0, help="freeze encoder weights during training") parser.add_argument('--fine_tune', dest='fine_tune', type=str, default=None, help='specific labels id to be fine tuned') parser.add_argument('--inst_norm', dest='inst_norm', type=int, default=0, help='use conditional instance normalization in your model') parser.add_argument('--sample_steps', dest='sample_steps', type=int, default=10, help='number of batches in between two samples are drawn from validation set') parser.add_argument('--checkpoint_steps', dest='checkpoint_steps', type=int, default=500, help='number of batches in between two checkpoints') args = parser.parse_args() def main(_): config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: model = GEGAN(args.experiment_dir, batch_size=args.batch_size, experiment_id=args.experiment_id, input_width=args.image_size, output_width=args.image_size, embedding_num=args.embedding_num, embedding_dim=args.embedding_dim, L1_penalty=args.L1_penalty, Lconst_penalty=args.Lconst_penalty, Ltv_penalty=args.Ltv_penalty, Lcategory_penalty=args.Lcategory_penalty) model.register_session(sess) model.build_model(is_training=True, inst_norm=args.inst_norm) model.train(lr=args.lr, epoch=args.epoch, resume=args.resume, schedule=args.schedule, freeze_encoder=args.freeze_encoder, sample_steps=args.sample_steps, checkpoint_steps=args.checkpoint_steps) if __name__ == '__main__': tf.app.run()
apache-2.0
-6,427,230,271,344,068,000
61.2
119
0.681404
false
3.74323
false
false
false
Ictp/indico
bin/utils/changeStyle.py
1
1781
# -*- coding: utf-8 -*- ## ## ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico;if not, see <http://www.gnu.org/licenses/>. from indico.core.db import DBMgr from MaKaC.webinterface import displayMgr from MaKaC.conference import CategoryManager logfile=open('./oldstyles','w') def changeCatStyle(cat): for subcat in cat.getSubCategoryList(): currentStyle=subcat.getDefaultStyle("meeting") subcat.setDefaultStyle("meeting", "lhcb_meeting") logfile.write("cat %s: %s"%(subcat.getId(), currentStyle)) changeCatStyle(subcat) for conf in cat.getConferenceList(): currentStyle=displayMgr.ConfDisplayMgrRegistery().getDisplayMgr(conf).getDefaultStyle() displayMgr.ConfDisplayMgrRegistery().getDisplayMgr(conf).setDefaultStyle("lhcb_meeting") logfile.write("\t\t\tconf %s: %s"%(conf.getId(), currentStyle)) dbm = DBMgr.getInstance() dbm.startRequest() cat=CategoryManager().getById('233') currentStyle=cat.getDefaultStyle("meeting") cat.setDefaultStyle("meeting", "lhcb_meeting") logfile.write("cat %s: %s"%(cat.getId(), currentStyle)) changeCatStyle(cat) dbm.endRequest()
gpl-3.0
-5,476,135,551,570,129,000
37.717391
96
0.732734
false
3.605263
false
false
false
tkaitchuck/nupic
external/common/lib/python2.6/site-packages/logilab/astng/test/unittest_inference.py
1
41136
# copyright 2003-2010 Sylvain Thenault, all rights reserved. # contact mailto:[email protected] # # This file is part of logilab-astng. # # logilab-astng is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # logilab-astng is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-astng. If not, see <http://www.gnu.org/licenses/>. """tests for the astng inference capabilities """ from os.path import join, dirname, abspath import sys from StringIO import StringIO from logilab.common.testlib import TestCase, unittest_main from logilab.astng import InferenceError, builder, nodes from logilab.astng.inference import infer_end as inference_infer_end from logilab.astng.bases import YES, Instance, BoundMethod, UnboundMethod,\ path_wrapper, BUILTINS_NAME def get_name_node(start_from, name, index=0): return [n for n in start_from.nodes_of_class(nodes.Name) if n.name == name][index] def get_node_of_class(start_from, klass): return start_from.nodes_of_class(klass).next() builder = builder.ASTNGBuilder() class InferenceUtilsTC(TestCase): def test_path_wrapper(self): def infer_default(self, *args): raise InferenceError infer_default = path_wrapper(infer_default) infer_end = path_wrapper(inference_infer_end) self.failUnlessRaises(InferenceError, infer_default(1).next) self.failUnlessEqual(infer_end(1).next(), 1) if sys.version_info < (3, 0): EXC_MODULE = 'exceptions' else: EXC_MODULE = BUILTINS_NAME class InferenceTC(TestCase): CODE = ''' class C(object): "new style" attr = 4 def meth1(self, arg1, optarg=0): var = object() print ("yo", arg1, optarg) self.iattr = "hop" return var def meth2(self): self.meth1(*self.meth3) def meth3(self, d=attr): b = self.attr c = self.iattr return b, c ex = Exception("msg") v = C().meth1(1) m_unbound = C.meth1 m_bound = C().meth1 a, b, c = ex, 1, "bonjour" [d, e, f] = [ex, 1.0, ("bonjour", v)] g, h = f i, (j, k) = "glup", f a, b= b, a # Gasp ! ''' astng = builder.string_build(CODE, __name__, __file__) def test_module_inference(self): infered = self.astng.infer() obj = infered.next() self.failUnlessEqual(obj.name, __name__) self.failUnlessEqual(obj.root().name, __name__) self.failUnlessRaises(StopIteration, infered.next) def test_class_inference(self): infered = self.astng['C'].infer() obj = infered.next() self.failUnlessEqual(obj.name, 'C') self.failUnlessEqual(obj.root().name, __name__) self.failUnlessRaises(StopIteration, infered.next) def test_function_inference(self): infered = self.astng['C']['meth1'].infer() obj = infered.next() self.failUnlessEqual(obj.name, 'meth1') self.failUnlessEqual(obj.root().name, __name__) self.failUnlessRaises(StopIteration, infered.next) def test_builtin_name_inference(self): infered = self.astng['C']['meth1']['var'].infer() var = infered.next() self.failUnlessEqual(var.name, 'object') self.failUnlessEqual(var.root().name, BUILTINS_NAME) self.failUnlessRaises(StopIteration, infered.next) def test_tupleassign_name_inference(self): infered = self.astng['a'].infer() exc = infered.next() self.assertIsInstance(exc, Instance) self.failUnlessEqual(exc.name, 'Exception') self.failUnlessEqual(exc.root().name, EXC_MODULE) self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['b'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, 1) self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['c'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, "bonjour") self.failUnlessRaises(StopIteration, infered.next) def test_listassign_name_inference(self): infered = self.astng['d'].infer() exc = infered.next() self.assertIsInstance(exc, Instance) self.failUnlessEqual(exc.name, 'Exception') self.failUnlessEqual(exc.root().name, EXC_MODULE) self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['e'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, 1.0) self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['f'].infer() const = infered.next() self.assertIsInstance(const, nodes.Tuple) self.failUnlessRaises(StopIteration, infered.next) def test_advanced_tupleassign_name_inference1(self): infered = self.astng['g'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, "bonjour") self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['h'].infer() var = infered.next() self.failUnlessEqual(var.name, 'object') self.failUnlessEqual(var.root().name, BUILTINS_NAME) self.failUnlessRaises(StopIteration, infered.next) def test_advanced_tupleassign_name_inference2(self): infered = self.astng['i'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, u"glup") self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['j'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, "bonjour") self.failUnlessRaises(StopIteration, infered.next) infered = self.astng['k'].infer() var = infered.next() self.failUnlessEqual(var.name, 'object') self.failUnlessEqual(var.root().name, BUILTINS_NAME) self.failUnlessRaises(StopIteration, infered.next) def test_swap_assign_inference(self): infered = self.astng.locals['a'][1].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, 1) self.failUnlessRaises(StopIteration, infered.next) infered = self.astng.locals['b'][1].infer() exc = infered.next() self.assertIsInstance(exc, Instance) self.failUnlessEqual(exc.name, 'Exception') self.failUnlessEqual(exc.root().name, EXC_MODULE) self.failUnlessRaises(StopIteration, infered.next) def test_getattr_inference1(self): infered = self.astng['ex'].infer() exc = infered.next() self.assertIsInstance(exc, Instance) self.failUnlessEqual(exc.name, 'Exception') self.failUnlessEqual(exc.root().name, EXC_MODULE) self.failUnlessRaises(StopIteration, infered.next) def test_getattr_inference2(self): infered = get_node_of_class(self.astng['C']['meth2'], nodes.Getattr).infer() meth1 = infered.next() self.failUnlessEqual(meth1.name, 'meth1') self.failUnlessEqual(meth1.root().name, __name__) self.failUnlessRaises(StopIteration, infered.next) def test_getattr_inference3(self): infered = self.astng['C']['meth3']['b'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, 4) self.failUnlessRaises(StopIteration, infered.next) def test_getattr_inference4(self): infered = self.astng['C']['meth3']['c'].infer() const = infered.next() self.assertIsInstance(const, nodes.Const) self.failUnlessEqual(const.value, "hop") self.failUnlessRaises(StopIteration, infered.next) def test_callfunc_inference(self): infered = self.astng['v'].infer() meth1 = infered.next() self.assertIsInstance(meth1, Instance) self.failUnlessEqual(meth1.name, 'object') self.failUnlessEqual(meth1.root().name, BUILTINS_NAME) self.failUnlessRaises(StopIteration, infered.next) def test_unbound_method_inference(self): infered = self.astng['m_unbound'].infer() meth1 = infered.next() self.assertIsInstance(meth1, UnboundMethod) self.failUnlessEqual(meth1.name, 'meth1') self.failUnlessEqual(meth1.parent.frame().name, 'C') self.failUnlessRaises(StopIteration, infered.next) def test_bound_method_inference(self): infered = self.astng['m_bound'].infer() meth1 = infered.next() self.assertIsInstance(meth1, BoundMethod) self.failUnlessEqual(meth1.name, 'meth1') self.failUnlessEqual(meth1.parent.frame().name, 'C') self.failUnlessRaises(StopIteration, infered.next) def test_args_default_inference1(self): optarg = get_name_node(self.astng['C']['meth1'], 'optarg') infered = optarg.infer() obj1 = infered.next() self.assertIsInstance(obj1, nodes.Const) self.failUnlessEqual(obj1.value, 0) obj1 = infered.next() self.assertIs(obj1, YES, obj1) self.failUnlessRaises(StopIteration, infered.next) def test_args_default_inference2(self): infered = self.astng['C']['meth3'].ilookup('d') obj1 = infered.next() self.assertIsInstance(obj1, nodes.Const) self.failUnlessEqual(obj1.value, 4) obj1 = infered.next() self.assertIs(obj1, YES, obj1) self.failUnlessRaises(StopIteration, infered.next) def test_inference_restrictions(self): infered = get_name_node(self.astng['C']['meth1'], 'arg1').infer() obj1 = infered.next() self.assertIs(obj1, YES, obj1) self.failUnlessRaises(StopIteration, infered.next) def test_ancestors_inference(self): code = ''' class A: pass class A(A): pass ''' astng = builder.string_build(code, __name__, __file__) a1 = astng.locals['A'][0] a2 = astng.locals['A'][1] a2_ancestors = list(a2.ancestors()) self.failUnlessEqual(len(a2_ancestors), 1) self.failUnless(a2_ancestors[0] is a1) def test_ancestors_inference2(self): code = ''' class A: pass class B(A): pass class A(B): pass ''' astng = builder.string_build(code, __name__, __file__) a1 = astng.locals['A'][0] a2 = astng.locals['A'][1] a2_ancestors = list(a2.ancestors()) self.failUnlessEqual(len(a2_ancestors), 2) self.failUnless(a2_ancestors[0] is astng.locals['B'][0]) self.failUnless(a2_ancestors[1] is a1, a2_ancestors[1]) def test_f_arg_f(self): code = ''' def f(f=1): return f a = f() ''' astng = builder.string_build(code, __name__, __file__) a = astng['a'] a_infered = a.infered() self.failUnlessEqual(a_infered[0].value, 1) self.assertEqual(len(a_infered), 1) def test_exc_ancestors(self): code = ''' def f(): raise NotImplementedError ''' astng = builder.string_build(code, __name__, __file__) error = astng.nodes_of_class(nodes.Name).next() nie = error.infered()[0] self.assertIsInstance(nie, nodes.Class) nie_ancestors = [c.name for c in nie.ancestors()] if sys.version_info < (3, 0): self.failUnlessEqual(nie_ancestors, ['RuntimeError', 'StandardError', 'Exception', 'BaseException', 'object']) else: self.failUnlessEqual(nie_ancestors, ['RuntimeError', 'Exception', 'BaseException', 'object']) def test_except_inference(self): code = ''' try: print (hop) except NameError, ex: ex1 = ex except Exception, ex: ex2 = ex raise ''' if sys.version_info >= (3, 0): code = code.replace(', ex:', ' as ex:') astng = builder.string_build(code, __name__, __file__) ex1 = astng['ex1'] ex1_infer = ex1.infer() ex1 = ex1_infer.next() self.assertIsInstance(ex1, Instance) self.failUnlessEqual(ex1.name, 'NameError') self.failUnlessRaises(StopIteration, ex1_infer.next) ex2 = astng['ex2'] ex2_infer = ex2.infer() ex2 = ex2_infer.next() self.assertIsInstance(ex2, Instance) self.failUnlessEqual(ex2.name, 'Exception') self.failUnlessRaises(StopIteration, ex2_infer.next) def test_del1(self): code = ''' del undefined_attr ''' delete = builder.string_build(code, __name__, __file__).body[0] self.failUnlessRaises(InferenceError, delete.infer) def test_del2(self): code = ''' a = 1 b = a del a c = a a = 2 d = a ''' astng = builder.string_build(code, __name__, __file__) n = astng['b'] n_infer = n.infer() infered = n_infer.next() self.assertIsInstance(infered, nodes.Const) self.failUnlessEqual(infered.value, 1) self.failUnlessRaises(StopIteration, n_infer.next) n = astng['c'] n_infer = n.infer() self.failUnlessRaises(InferenceError, n_infer.next) n = astng['d'] n_infer = n.infer() infered = n_infer.next() self.assertIsInstance(infered, nodes.Const) self.failUnlessEqual(infered.value, 2) self.failUnlessRaises(StopIteration, n_infer.next) def test_builtin_types(self): code = ''' l = [1] t = (2,) d = {} s = '' s2 = '_' ''' astng = builder.string_build(code, __name__, __file__) n = astng['l'] infered = n.infer().next() self.assertIsInstance(infered, nodes.List) self.assertIsInstance(infered, Instance) self.failUnlessEqual(infered.getitem(0).value, 1) self.assertIsInstance(infered._proxied, nodes.Class) self.failUnlessEqual(infered._proxied.name, 'list') self.failUnless('append' in infered._proxied.locals) n = astng['t'] infered = n.infer().next() self.assertIsInstance(infered, nodes.Tuple) self.assertIsInstance(infered, Instance) self.failUnlessEqual(infered.getitem(0).value, 2) self.assertIsInstance(infered._proxied, nodes.Class) self.failUnlessEqual(infered._proxied.name, 'tuple') n = astng['d'] infered = n.infer().next() self.assertIsInstance(infered, nodes.Dict) self.assertIsInstance(infered, Instance) self.assertIsInstance(infered._proxied, nodes.Class) self.failUnlessEqual(infered._proxied.name, 'dict') self.failUnless('get' in infered._proxied.locals) n = astng['s'] infered = n.infer().next() self.assertIsInstance(infered, nodes.Const) self.assertIsInstance(infered, Instance) self.failUnlessEqual(infered.name, 'str') self.failUnless('lower' in infered._proxied.locals) n = astng['s2'] infered = n.infer().next() self.failUnlessEqual(infered.getitem(0).value, '_') def test_unicode_type(self): if sys.version_info >= (3, 0): self.skipTest('unicode removed on py >= 3.0') code = '''u = u""''' astng = builder.string_build(code, __name__, __file__) n = astng['u'] infered = n.infer().next() self.assertIsInstance(infered, nodes.Const) self.assertIsInstance(infered, Instance) self.failUnlessEqual(infered.name, 'unicode') self.failUnless('lower' in infered._proxied.locals) def test_descriptor_are_callable(self): code = ''' class A: statm = staticmethod(open) clsm = classmethod('whatever') ''' astng = builder.string_build(code, __name__, __file__) statm = astng['A'].igetattr('statm').next() self.failUnless(statm.callable()) clsm = astng['A'].igetattr('clsm').next() self.failUnless(clsm.callable()) def test_bt_ancestor_crash(self): code = ''' class Warning(Warning): pass ''' astng = builder.string_build(code, __name__, __file__) w = astng['Warning'] ancestors = w.ancestors() ancestor = ancestors.next() self.failUnlessEqual(ancestor.name, 'Warning') self.failUnlessEqual(ancestor.root().name, EXC_MODULE) ancestor = ancestors.next() self.failUnlessEqual(ancestor.name, 'Exception') self.failUnlessEqual(ancestor.root().name, EXC_MODULE) ancestor = ancestors.next() self.failUnlessEqual(ancestor.name, 'BaseException') self.failUnlessEqual(ancestor.root().name, EXC_MODULE) ancestor = ancestors.next() self.failUnlessEqual(ancestor.name, 'object') self.failUnlessEqual(ancestor.root().name, BUILTINS_NAME) self.failUnlessRaises(StopIteration, ancestors.next) def test_qqch(self): code = ''' from logilab.common.modutils import load_module_from_name xxx = load_module_from_name('__pkginfo__') ''' astng = builder.string_build(code, __name__, __file__) xxx = astng['xxx'] self.assertSetEqual(set(n.__class__ for n in xxx.infered()), set([nodes.Const, YES.__class__])) def test_method_argument(self): code = ''' class ErudiEntitySchema: """a entity has a type, a set of subject and or object relations""" def __init__(self, e_type, **kwargs): kwargs['e_type'] = e_type.capitalize().encode() def meth(self, e_type, *args, **kwargs): kwargs['e_type'] = e_type.capitalize().encode() print(args) ''' astng = builder.string_build(code, __name__, __file__) arg = get_name_node(astng['ErudiEntitySchema']['__init__'], 'e_type') self.failUnlessEqual([n.__class__ for n in arg.infer()], [YES.__class__]) arg = get_name_node(astng['ErudiEntitySchema']['__init__'], 'kwargs') self.failUnlessEqual([n.__class__ for n in arg.infer()], [nodes.Dict]) arg = get_name_node(astng['ErudiEntitySchema']['meth'], 'e_type') self.failUnlessEqual([n.__class__ for n in arg.infer()], [YES.__class__]) arg = get_name_node(astng['ErudiEntitySchema']['meth'], 'args') self.failUnlessEqual([n.__class__ for n in arg.infer()], [nodes.Tuple]) arg = get_name_node(astng['ErudiEntitySchema']['meth'], 'kwargs') self.failUnlessEqual([n.__class__ for n in arg.infer()], [nodes.Dict]) def test_tuple_then_list(self): code = ''' def test_view(rql, vid, tags=()): tags = list(tags) tags.append(vid) ''' astng = builder.string_build(code, __name__, __file__) name = get_name_node(astng['test_view'], 'tags', -1) it = name.infer() tags = it.next() self.failUnlessEqual(tags.__class__, Instance) self.failUnlessEqual(tags._proxied.name, 'list') self.failUnlessRaises(StopIteration, it.next) def test_mulassign_inference(self): code = ''' def first_word(line): """Return the first word of a line""" return line.split()[0] def last_word(line): """Return last word of a line""" return line.split()[-1] def process_line(word_pos): """Silly function: returns (ok, callable) based on argument. For test purpose only. """ if word_pos > 0: return (True, first_word) elif word_pos < 0: return (True, last_word) else: return (False, None) if __name__ == '__main__': line_number = 0 for a_line in file('test_callable.py'): tupletest = process_line(line_number) (ok, fct) = process_line(line_number) if ok: fct(a_line) ''' astng = builder.string_build(code, __name__, __file__) self.failUnlessEqual(len(list(astng['process_line'].infer_call_result( None))), 3) self.failUnlessEqual(len(list(astng['tupletest'].infer())), 3) values = ['Function(first_word)', 'Function(last_word)', 'Const(NoneType)'] self.failUnlessEqual([str(infered) for infered in astng['fct'].infer()], values) def test_float_complex_ambiguity(self): code = ''' def no_conjugate_member(magic_flag): """should not raise E1101 on something.conjugate""" if magic_flag: something = 1.0 else: something = 1.0j if isinstance(something, float): return something return something.conjugate() ''' astng = builder.string_build(code, __name__, __file__) self.failUnlessEqual([i.value for i in astng['no_conjugate_member'].ilookup('something')], [1.0, 1.0j]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'something', -1).infer()], [1.0, 1.0j]) def test_lookup_cond_branches(self): code = ''' def no_conjugate_member(magic_flag): """should not raise E1101 on something.conjugate""" something = 1.0 if magic_flag: something = 1.0j return something.conjugate() ''' astng = builder.string_build(code, __name__, __file__) self.failUnlessEqual([i.value for i in get_name_node(astng, 'something', -1).infer()], [1.0, 1.0j]) def test_simple_subscript(self): code = ''' a = [1, 2, 3][0] b = (1, 2, 3)[1] c = (1, 2, 3)[-1] d = a + b + c print (d) ''' astng = builder.string_build(code, __name__, __file__) self.failUnlessEqual([i.value for i in get_name_node(astng, 'a', -1).infer()], [1]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'b', -1).infer()], [2]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'c', -1).infer()], [3]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'd', -1).infer()], [6]) #def test_simple_tuple(self): #"""test case for a simple tuple value""" ## XXX tuple inference is not implemented ... #code = """ #a = (1,) #b = (22,) #some = a + b #""" #astng = builder.string_build(code, __name__, __file__) #self.failUnlessEqual(astng['some'].infer.next().as_string(), "(1, 22)") def test_simple_for(self): code = ''' for a in [1, 2, 3]: print (a) for b,c in [(1,2), (3,4)]: print (b) print (c) print ([(d,e) for e,d in ([1,2], [3,4])]) ''' astng = builder.string_build(code, __name__, __file__) self.failUnlessEqual([i.value for i in get_name_node(astng, 'a', -1).infer()], [1, 2, 3]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'b', -1).infer()], [1, 3]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'c', -1).infer()], [2, 4]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'd', -1).infer()], [2, 4]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'e', -1).infer()], [1, 3]) def test_simple_for_genexpr(self): code = ''' print ((d,e) for e,d in ([1,2], [3,4])) ''' astng = builder.string_build(code, __name__, __file__) self.failUnlessEqual([i.value for i in get_name_node(astng, 'd', -1).infer()], [2, 4]) self.failUnlessEqual([i.value for i in get_name_node(astng, 'e', -1).infer()], [1, 3]) def test_builtin_help(self): code = ''' help() ''' # XXX failing since __builtin__.help assignment has # been moved into a function... astng = builder.string_build(code, __name__, __file__) node = get_name_node(astng, 'help', -1) infered = list(node.infer()) self.failUnlessEqual(len(infered), 1, infered) self.assertIsInstance(infered[0], Instance) self.failUnlessEqual(str(infered[0]), 'Instance of site._Helper') def test_builtin_open(self): code = ''' open("toto.txt") ''' astng = builder.string_build(code, __name__, __file__) node = get_name_node(astng, 'open', -1) infered = list(node.infer()) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Function) self.failUnlessEqual(infered[0].name, 'open') def test_callfunc_context_func(self): code = ''' def mirror(arg=None): return arg un = mirror(1) ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng.igetattr('un')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Const) self.failUnlessEqual(infered[0].value, 1) def test_callfunc_context_lambda(self): code = ''' mirror = lambda x=None: x un = mirror(1) ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng.igetattr('mirror')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Lambda) infered = list(astng.igetattr('un')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Const) self.failUnlessEqual(infered[0].value, 1) def test_factory_method(self): code = ''' class Super(object): @classmethod def instance(cls): return cls() class Sub(Super): def method(self): print ('method called') sub = Sub.instance() ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng.igetattr('sub')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], Instance) self.failUnlessEqual(infered[0]._proxied.name, 'Sub') def test_import_as(self): code = ''' import os.path as osp print (osp.dirname(__file__)) from os.path import exists as e assert e(__file__) from new import code as make_code print (make_code) ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng.igetattr('osp')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Module) self.failUnlessEqual(infered[0].name, 'os.path') infered = list(astng.igetattr('e')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Function) self.failUnlessEqual(infered[0].name, 'exists') if sys.version_info >= (3, 0): self.skipTest('<new> module has been removed') infered = list(astng.igetattr('make_code')) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], Instance) self.failUnlessEqual(str(infered[0]), 'Instance of %s.type' % BUILTINS_NAME) def _test_const_infered(self, node, value): infered = list(node.infer()) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Const) self.failUnlessEqual(infered[0].value, value) def test_unary_not(self): for code in ('a = not (1,); b = not ()', 'a = not {1:2}; b = not {}'): astng = builder.string_build(code, __name__, __file__) self._test_const_infered(astng['a'], False) self._test_const_infered(astng['b'], True) def test_binary_op_int_add(self): astng = builder.string_build('a = 1 + 2', __name__, __file__) self._test_const_infered(astng['a'], 3) def test_binary_op_int_sub(self): astng = builder.string_build('a = 1 - 2', __name__, __file__) self._test_const_infered(astng['a'], -1) def test_binary_op_float_div(self): astng = builder.string_build('a = 1 / 2.', __name__, __file__) self._test_const_infered(astng['a'], 1 / 2.) def test_binary_op_str_mul(self): astng = builder.string_build('a = "*" * 40', __name__, __file__) self._test_const_infered(astng['a'], "*" * 40) def test_binary_op_bitand(self): astng = builder.string_build('a = 23&20', __name__, __file__) self._test_const_infered(astng['a'], 23&20) def test_binary_op_bitor(self): astng = builder.string_build('a = 23|8', __name__, __file__) self._test_const_infered(astng['a'], 23|8) def test_binary_op_bitxor(self): astng = builder.string_build('a = 23^9', __name__, __file__) self._test_const_infered(astng['a'], 23^9) def test_binary_op_shiftright(self): astng = builder.string_build('a = 23 >>1', __name__, __file__) self._test_const_infered(astng['a'], 23>>1) def test_binary_op_shiftleft(self): astng = builder.string_build('a = 23 <<1', __name__, __file__) self._test_const_infered(astng['a'], 23<<1) def test_binary_op_list_mul(self): for code in ('a = [[]] * 2', 'a = 2 * [[]]'): astng = builder.string_build(code, __name__, __file__) infered = list(astng['a'].infer()) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.List) self.failUnlessEqual(len(infered[0].elts), 2) self.assertIsInstance(infered[0].elts[0], nodes.List) self.assertIsInstance(infered[0].elts[1], nodes.List) def test_binary_op_list_mul_none(self): 'test correct handling on list multiplied by None' astng = builder.string_build( 'a = [1] * None\nb = [1] * "r"') infered = astng['a'].infered() self.assertEqual(len(infered), 1) self.assertEqual(infered[0], YES) infered = astng['b'].infered() self.assertEqual(len(infered), 1) self.assertEqual(infered[0], YES) def test_binary_op_tuple_add(self): astng = builder.string_build('a = (1,) + (2,)', __name__, __file__) infered = list(astng['a'].infer()) self.failUnlessEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Tuple) self.failUnlessEqual(len(infered[0].elts), 2) self.failUnlessEqual(infered[0].elts[0].value, 1) self.failUnlessEqual(infered[0].elts[1].value, 2) def test_binary_op_custom_class(self): code = ''' class myarray: def __init__(self, array): self.array = array def __mul__(self, x): return myarray([2,4,6]) def astype(self): return "ASTYPE" def randint(maximum): if maximum is not None: return myarray([1,2,3]) * 2 else: return int(5) x = randint(1) ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng.igetattr('x')) self.failUnlessEqual(len(infered), 2) value = [str(v) for v in infered] # The __name__ trick here makes it work when invoked directly # (__name__ == '__main__') and through pytest (__name__ == # 'unittest_inference') self.assertEqual(value, ['Instance of %s.myarray' % __name__, 'Instance of %s.int' % BUILTINS_NAME]) def test_nonregr_lambda_arg(self): code = ''' def f(g = lambda: None): g().x ''' astng = builder.string_build(code, __name__, __file__) callfuncnode = astng['f'].body[0].value.expr infered = list(callfuncnode.infer()) self.failUnlessEqual(len(infered), 2, infered) infered.remove(YES) self.assertIsInstance(infered[0], nodes.Const) self.failUnlessEqual(infered[0].value, None) def test_nonregr_getitem_empty_tuple(self): code = ''' def f(x): a = ()[x] ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng['f'].ilookup('a')) self.failUnlessEqual(len(infered), 1) self.failUnlessEqual(infered[0], YES) def test_python25_generator_exit(self): sys.stderr = StringIO() data = "b = {}[str(0)+''].a" astng = builder.string_build(data, __name__, __file__) list(astng['b'].infer()) output = sys.stderr.getvalue() # I have no idea how to test for this in another way... self.failIf("RuntimeError" in output, "Exception exceptions.RuntimeError: 'generator ignored GeneratorExit' in <generator object> ignored") sys.stderr = sys.__stderr__ def test_python25_relative_import(self): data = "from ...common import date; print (date)" # !! FIXME also this relative import would not work 'in real' (no __init__.py in test/) # the test works since we pretend we have a package by passing the full modname astng = builder.string_build(data, 'logilab.astng.test.unittest_inference', __file__) infered = get_name_node(astng, 'date').infer().next() self.assertIsInstance(infered, nodes.Module) self.assertEqual(infered.name, 'logilab.common.date') def test_python25_no_relative_import(self): fname = join(abspath(dirname(__file__)), 'regrtest_data', 'package', 'absimport.py') astng = builder.file_build(fname, 'absimport') self.failUnless(astng.absolute_import_activated(), True) infered = get_name_node(astng, 'import_package_subpackage_module').infer().next() # failed to import since absolute_import is activated self.failUnless(infered is YES) def test_nonregr_absolute_import(self): fname = join(abspath(dirname(__file__)), 'regrtest_data', 'absimp', 'string.py') astng = builder.file_build(fname, 'absimp.string') self.failUnless(astng.absolute_import_activated(), True) infered = get_name_node(astng, 'string').infer().next() self.assertIsInstance(infered, nodes.Module) self.assertEqual(infered.name, 'string') self.failUnless('lower' in infered.locals) def test_mechanize_open(self): try: import mechanize except ImportError: self.skipTest('require mechanize installed') data = '''from mechanize import Browser print (Browser) b = Browser() ''' astng = builder.string_build(data, __name__, __file__) browser = get_name_node(astng, 'Browser').infer().next() self.assertIsInstance(browser, nodes.Class) bopen = list(browser.igetattr('open')) self.skipTest('the commit said: "huum, see that later"') self.assertEqual(len(bopen), 1) self.assertIsInstance(bopen[0], nodes.Function) self.failUnless(bopen[0].callable()) b = get_name_node(astng, 'b').infer().next() self.assertIsInstance(b, Instance) bopen = list(b.igetattr('open')) self.assertEqual(len(bopen), 1) self.assertIsInstance(bopen[0], BoundMethod) self.failUnless(bopen[0].callable()) def test_property(self): code = ''' from smtplib import SMTP class SendMailController(object): @property def smtp(self): return SMTP(mailhost, port) @property def me(self): return self my_smtp = SendMailController().smtp my_me = SendMailController().me ''' decorators = set(['%s.property' % BUILTINS_NAME]) astng = builder.string_build(code, __name__, __file__) self.assertEqual(astng['SendMailController']['smtp'].decoratornames(), decorators) propinfered = list(astng.body[2].value.infer()) self.assertEqual(len(propinfered), 1) propinfered = propinfered[0] self.assertIsInstance(propinfered, Instance) self.assertEqual(propinfered.name, 'SMTP') self.assertEqual(propinfered.root().name, 'smtplib') self.assertEqual(astng['SendMailController']['me'].decoratornames(), decorators) propinfered = list(astng.body[3].value.infer()) self.assertEqual(len(propinfered), 1) propinfered = propinfered[0] self.assertIsInstance(propinfered, Instance) self.assertEqual(propinfered.name, 'SendMailController') self.assertEqual(propinfered.root().name, __name__) def test_im_func_unwrap(self): code = ''' class EnvBasedTC: def pactions(self): pass pactions = EnvBasedTC.pactions.im_func print (pactions) class EnvBasedTC2: pactions = EnvBasedTC.pactions.im_func print (pactions) ''' astng = builder.string_build(code, __name__, __file__) pactions = get_name_node(astng, 'pactions') infered = list(pactions.infer()) self.assertEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Function) pactions = get_name_node(astng['EnvBasedTC2'], 'pactions') infered = list(pactions.infer()) self.assertEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Function) def test_augassign(self): code = ''' a = 1 a += 2 print (a) ''' astng = builder.string_build(code, __name__, __file__) infered = list(get_name_node(astng, 'a').infer()) self.assertEqual(len(infered), 1) self.assertIsInstance(infered[0], nodes.Const) self.assertEqual(infered[0].value, 3) def test_nonregr_func_arg(self): code = ''' def foo(self, bar): def baz(): pass def qux(): return baz spam = bar(None, qux) print (spam) ''' astng = builder.string_build(code, __name__, __file__) infered = list(get_name_node(astng['foo'], 'spam').infer()) self.assertEqual(len(infered), 1) self.assertIs(infered[0], YES) def test_nonregr_func_global(self): code = ''' active_application = None def get_active_application(): global active_application return active_application class Application(object): def __init__(self): global active_application active_application = self class DataManager(object): def __init__(self, app=None): self.app = get_active_application() def test(self): p = self.app print (p) ''' astng = builder.string_build(code, __name__, __file__) infered = list(Instance(astng['DataManager']).igetattr('app')) self.assertEqual(len(infered), 2, infered) # None / Instance(Application) infered = list(get_name_node(astng['DataManager']['test'], 'p').infer()) self.assertEqual(len(infered), 2, infered) for node in infered: if isinstance(node, Instance) and node.name == 'Application': break else: self.fail('expected to find an instance of Application in %s' % infered) def test_list_inference(self): """#20464""" code = ''' import optparse A = [] B = [] def test(): xyz = [ "foobar=%s" % options.ca, ] + A + B if options.bind is not None: xyz.append("bind=%s" % options.bind) return xyz def main(): global options parser = optparse.OptionParser() (options, args) = parser.parse_args() Z = test() ''' astng = builder.string_build(code, __name__, __file__) infered = list(astng['Z'].infer()) self.assertEqual(len(infered), 1, infered) self.assertIsInstance(infered[0], Instance) self.assertIsInstance(infered[0]._proxied, nodes.Class) self.assertEqual(infered[0]._proxied.name, 'list') def test__new__(self): code = ''' class NewTest(object): "doc" def __new__(cls, arg): self = object.__new__(cls) self.arg = arg return self n = NewTest() ''' astng = builder.string_build(code, __name__, __file__) self.assertRaises(InferenceError, list, astng['NewTest'].igetattr('arg')) n = astng['n'].infer().next() infered = list(n.igetattr('arg')) self.assertEqual(len(infered), 1, infered) def test_two_parents_from_same_module(self): code = ''' from data import nonregr class Xxx(nonregr.Aaa, nonregr.Ccc): "doc" ''' astng = builder.string_build(code, __name__, __file__) parents = list(astng['Xxx'].ancestors()) self.assertEqual(len(parents), 3, parents) # Aaa, Ccc, object if __name__ == '__main__': unittest_main()
gpl-3.0
3,295,323,266,890,843,000
34.926638
147
0.594953
false
3.587963
true
false
false
vietdh85/vh-utility
script/rcb/graspgold.py
1
1037
import sys import os.path from pyquery import PyQuery as pq import time import common def getValues(item): url = item[3] print("getValues(): ", url) format = "%b %d %Y %H:%M:%S" d = pq(url=url) list = d(".list td") index = 0 while index < len(list): try : obj = {} obj['date'] = common.removeNumberString(list[index].text_content()) obj['time'] = common.dateStringToTimestamp(obj['date'], format=format) obj['time'] = common.formatTimestamp(obj['time']) obj['user'] = list[index + 1].text_content() obj['deposit'] = list[index + 2].text_content().split("/")[0].replace("$", "") obj['site_id'] = item[0] obj['monitor'] = item[2] print("{0} - {1} - {2} - {3} - {4} - {5}".format(obj['site_id'], obj['monitor'], obj['date'], obj['time'], obj['user'], obj['deposit'])) if common.insertUserRcb(obj) == -1: return except Exception: pass index += 5 def run(item): print "\n========== RUN graspgold.run() ============" # try : getValues(item) # except Exception: # pass
gpl-3.0
1,925,032,494,287,725,800
22.568182
139
0.580521
false
2.757979
false
false
false
juliakreutzer/bandit-neuralmonkey
neuralmonkey/decoders/word_alignment_decoder.py
1
3918
from typing import cast import numpy as np import tensorflow as tf from neuralmonkey.dataset import Dataset from neuralmonkey.encoders.recurrent import RecurrentEncoder from neuralmonkey.decoders.decoder import Decoder from neuralmonkey.logging import warn from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs from neuralmonkey.model.sequence import Sequence from neuralmonkey.decorators import tensor class WordAlignmentDecoder(ModelPart): """A decoder that computes soft alignment from an attentive encoder. Loss is computed as cross-entropy against a reference alignment. """ def __init__(self, encoder: RecurrentEncoder, decoder: Decoder, data_id: str, name: str, initializers: InitializerSpecs = None) -> None: ModelPart.__init__(self, name, None, None, initializers) self.encoder = encoder self.decoder = decoder self.data_id = data_id if not isinstance(self.encoder.input_sequence, Sequence): raise TypeError("Expected Sequence type in encoder.input_sequence") self.enc_input = cast(Sequence, self.encoder.input_sequence) # TODO this is here to call the lazy properties which create # the list of attention distribbutions # pylint: disable=pointless-statement self.decoder.runtime_logits self.decoder.train_logits # pylint: enable=pointless-statement _, self.train_loss = self._make_decoder(runtime_mode=False) self.decoded, self.runtime_loss = self._make_decoder(runtime_mode=True) tf.summary.scalar("alignment_train_xent", self.train_loss, collections=["summary_train"]) @tensor def ref_alignment(self) -> tf.Tensor: # TODO dynamic shape? return tf.placeholder( dtype=tf.float32, shape=[None, self.decoder.max_output_len, self.enc_input.max_length], name="ref_alignment") @tensor def alignment_target(self) -> tf.Tensor: # shape will be [max_output_len, batch_size, max_input_len] return tf.transpose(self.ref_alignment, perm=[1, 0, 2]) def _make_decoder(self, runtime_mode=False): attn_obj = self.decoder.get_attention_object(self.encoder, not runtime_mode) if runtime_mode: alignment_logits = tf.stack( attn_obj.histories["{}_run".format( self.decoder.name)], name="alignment_logits") # make batch_size the first dimension alignment = tf.transpose(tf.nn.softmax(alignment_logits), perm=[1, 0, 2]) loss = tf.constant(0) else: alignment_logits = tf.stack( attn_obj.histories["{}_train".format( self.decoder.name)], name="alignment_logits") alignment = None xent = tf.nn.softmax_cross_entropy_with_logits( labels=self.alignment_target, logits=alignment_logits) loss = tf.reduce_sum(xent * self.decoder.train_padding) return alignment, loss @property def cost(self) -> tf.Tensor: return self.train_loss def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict: fd = {} alignment = dataset.maybe_get_series(self.data_id) if alignment is None: if train: warn("Training alignment not present!") alignment = np.zeros((len(dataset), self.decoder.max_output_len, self.enc_input.max_length), np.float32) fd[self.ref_alignment] = alignment return fd
bsd-3-clause
5,833,592,751,520,678,000
34.944954
79
0.593415
false
4.367893
false
false
false
JamesSample/ecosystem_services_impacts
Code/01_es_lu_cc.py
1
21539
#------------------------------------------------------------------------------ # Name: 01_es_lu_cc.py # Purpose: Processing for the CREW project on ES, LUC and CC. # # Author: James Sample # # Created: 14/01/2015 # Copyright: (c) James Sample and JHI, 2015 # License: https://github.com/JamesSample/ecosystem_services_impacts/blob/master/LICENSE #------------------------------------------------------------------------------ """ Processes the Future Flows (FF) climate data and estimate climate and land use change effects on Ecosystem Services (ES). Reads workshop outputs and performs the following steps: 1. For each ES, reads monthly rainfall and ET grids for the months specified for both baseline and future periods. For the seasons of interest, calculates the % change in rainfall and ET between baseline and future. 2. Combines rainfall and runoff percentage changes into a qualitative grid of change in runoff. 3. Estimates impacts grids for each ES for CC only, LUC only and CC & LUC combined. Inputs grids are supplied in HDF5 file format. """ import pandas as pd, h5py, numpy as np, matplotlib, matplotlib.pyplot as plt import os, sys from mpl_toolkits.axes_grid1 import ImageGrid from osgeo import gdal, gdalconst, osr def read_array_from_h5(h5, variable, model, year, month): """ Read an array from a specified location in an H5 file. Args: h5: The open HDF5 file object variable: The variable of interest ('rainfall' or 'pet') model: The code for the climate model of interest (string) year: Year (integer) month: Month (integer) Returns: array """ dset_path = r'/ff_data/%s/%s/%s_%s' % (model, variable, variable, year) data = h5.get(dset_path)[:,:,month-1].astype(float) # Set NoData to NaN data[data==-99] = np.nan # Convert units data = data/100 return data def avg_rain_et(h5, st_yr, end_yr, months): """ Calculate average rainfall and ET grids for the specified years and months. Args: h5: The open HDF5 file object st_yr: Start year for period of interest (integer) end_yr: End year for period of interest (integer) months: List of months of interest (integers) Returns: Tuple of arrays (average rainfall, average PET) """ # Empty arrays to store rainfall and ET totals rn_tot = np.zeros((715, 485)) et_tot = np.zeros((715, 485)) # Total number of years to average over years = end_yr + 1 - st_yr # Loop over rainfall and ET for year in range(st_yr, end_yr+1): for month in months: # Read rainfall and ET grids rn = read_array_from_h5(h5, 'rainfall', model, year, month) et = read_array_from_h5(h5, 'pet', model, year, month) # Add to totals rn_tot += rn et_tot += et # Average rn_av = rn_tot/years et_av = et_tot/years return (rn_av, et_av) def plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av): """ Plot the average rainfall and ET grids. Used for testing. Args: base_rn_av: Average rainfall grid for baseline period. base_et_av: Average PET grid for baseline period. fut_rn_av: Average rainfall grid for future period. fut_et_av: Average PET grid for future period. Returns: None. Displays maps of each grid using same colour scale. """ # Get min and max values from grids rnmin = min(np.nanmin(base_rn_av), np.nanmin(fut_rn_av)) rnmax = max(np.nanmax(base_rn_av), np.nanmax(fut_rn_av)) etmin = min(np.nanmin(base_et_av), np.nanmin(fut_et_av)) etmax = max(np.nanmax(base_et_av), np.nanmax(fut_et_av)) # Plot fig = plt.figure() grid = ImageGrid(fig, 111, nrows_ncols = (1, 4), axes_pad=0.5, cbar_mode='each') im0 = grid[0].imshow(base_rn_av, vmin=rnmin, vmax=rnmax, interpolation='nearest') grid.cbar_axes[0].colorbar(im0) im1 = grid[1].imshow(fut_rn_av, vmin=rnmin, vmax=rnmax, interpolation='nearest') grid.cbar_axes[1].colorbar(im1) im2 = grid[2].imshow(base_et_av, vmin=etmin, vmax=etmax, interpolation='nearest') grid.cbar_axes[2].colorbar(im2) im3 = grid[3].imshow(fut_et_av, vmin=etmin, vmax=etmax, interpolation='nearest') grid.cbar_axes[3].colorbar(im3) plt.show() def plot_reclassified_grid(array, out_path, sup_title='Main title', title='Sub-title'): """ Plot and save the reclassified grid. Args: array: Grid of integers in range -2 to +2 out_path: Output file path (PNG or PDF) sup_title: Main title for plot (string) title: Sub-title for plot (string) Returns: None. Saves a plot to the specified path. """ # Make a color map of fixed colors cmap = matplotlib.colors.ListedColormap(['Red', 'Orange', 'LimeGreen', 'DeepSkyBlue', 'Blue']) bounds=[-2.5, -1.5, -0.5, 0.5, 1.5, 2.5] norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) # Create axes for plot (A4 size) fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8.3,11.7)) # Plot the array, using the colours specified img = axes.imshow(array, interpolation='nearest', origin='upper', cmap=cmap, norm=norm) # Add labels to plot plt.title(title) plt.suptitle(sup_title, fontsize=16, y=0.95) plt.ylabel('Northing') plt.xlabel('Easting') plt.grid(True) # Reformat the axis labels (mainly change the Y values into northings) axes.set_yticks([35, 135, 235, 335, 435, 535, 635, 735]) axes.set_yticklabels([1200, 1100, 1000, 900, 800, 700, 600, 500]) axes.set_xticks([100, 200, 300, 400]) # Add axes for the color bar cax = fig.add_axes([0.2, 0.785, 0.02, 0.10]) # Add the colour bar and set labels cbar = fig.colorbar(img, cax=cax, cmap=cmap, norm=norm, boundaries=bounds, ticks=[-2.2,-1.2,-0.2,0.8,1.8]) cbar.set_ticklabels(['Large decrease', 'Small decrease', 'Neutral', 'Small increase', 'Large increase'], update_ticks=True) # Make the cbar ticks invisible ticks = cbar.ax.get_yticklines() for tick in ticks: plt.setp(tick, alpha=0) cbar_labels = plt.getp(cbar.ax.axes, 'yticklabels') plt.setp(cbar_labels, fontsize=10) # Save fig plt.savefig(out_path, dpi=300) ## plt.show() plt.clf() plt.close() def reclass_rn_et_grid(array): """ Take an array of percentage changes and reclassify it according to: % change | Class x<=-15 | -2 -15<x<=-5 | -1 -5<x<=5 | 0 5<x<=15 | +1 15<x | +2 Args: array: Array of percentage changes to be reclassified. Returns: Reclassified array """ # Create copy of array for reclass values rc = array.copy() rc[array<=-15] = -2 rc[(-15<array) & (array<=-5)] = -1 rc[(-5<array) & (array<=5)] = 0 rc[(5<array) & (array<=15)] = 1 rc[15<array] = 2 return rc def reclass_ro(matrix_path, rn, et): """ Generate reclassification matrix for runoff based on reclassified change grids for rainfall and PET and the runoff reclassification matrix from the workshop. Args: matrix_path: Path to CSV file representing runoff matrix. rn: Reclassified rainfall grid from reclass_rn_et_grid et: Reclassified PET grid from reclass_rn_et_grid Returns: Array (grid of integers representing change in runoff) """ # Read matrix df = pd.read_csv(matrix_path, index_col=0) # Grid of NaNs wih correct shape ro = rn.copy()*np.nan # Loop over inidces for x, y in np.ndindex(ro.shape): # Get values for change in rainfall and ET et_ch = et[x, y] rn_ch = rn[x, y] # If both are not nan, reclassify if (np.isfinite(et_ch) and np.isfinite(rn_ch)): rc_val = df.ix[int(et_ch), str(int(rn_ch))] ro[x, y] = rc_val return ro def reclass_es_ro(es_idx, ro): """ Reclassify the runoff grid to estimate effects of runoff change on each ES. Args: es_idx: The ID of the ES of interest in data frame ro_df ro: The runoff change grid from reclass_ro Returns: Array (grid of integers representing change in ES) """ # Make a copy of the ro grid to update es = ro.copy() # Reclassify for chng in [-2, -1, 0, 1, 2]: es[ro==chng] = ro_df.ix[es_idx, 'RO_%d' % chng] return es def read_ascii(ascii_path, xmin=0, xmax=485000, ymin=520000, ymax=1235000, exptd_rows=715, exptd_cols=485, exptd_px_wd=1000, exptd_px_ht=-1000, exptd_ndv=-9999): """ Read an ASCII grid file, clip it to the specified bounding box and return a numpy array. Args: xmin: Minimum Easting in OSGB1936 metres. xmax: Maximum Easting in OSGB1936 metres. ymin: Minimum Northing in OSGB1936 metres. ymax: Maximum Northing in OSGB1936 metres. exptd_rows: No. of rows expected in file. exptd_cols: No. of columns expected in file. exptd_px_wd: Cell width. exptd_px_ht: Cell height. exptd_ndv: No data value. Returns: Array (floats). """ # Register drivers gdal.AllRegister() # Process the file with GDAL ds = gdal.Open(ascii_path, gdalconst.GA_ReadOnly) if ds is None: print 'Could not open ' + ascii_path sys.exit(1) # In order to select the first cell correctly, choose a point just within # the top left corner of the specified bounding box. x = xmin + 10 y = ymax - 10 # Dataset properties geotransform = ds.GetGeoTransform() originX = geotransform[0] originY = geotransform[3] pixelWidth = geotransform[1] pixelHeight = geotransform[5] # Calculate number of rows and cols to return rows = abs(int((ymax-ymin)/pixelHeight)) cols = int((xmax-xmin)/pixelWidth) # Select starting pixel xOffset = int((x - originX) / pixelWidth) yOffset = int((y - originY) / pixelHeight) band = ds.GetRasterBand(1) no_data_val = band.GetNoDataValue() # Simple checking assert rows == exptd_rows assert cols == exptd_cols assert pixelWidth == exptd_px_wd assert pixelHeight == exptd_px_ht assert no_data_val == exptd_ndv # Read the data to an array data = band.ReadAsArray(xOffset, yOffset, cols, rows) # Close the dataset ds = None return data.astype(float) def process_land_use_change(lu_mat_path, base, fut, esid, codes_df): """ Estimate land use change (LUC) only effects for the specified ES. Args: lu_mat_path: Excel file containing land use matrices from the workshop. base: Baseline land luse grid. fut: Future land luse grid. esid: ES ID from land use matrices Excel file codes_df: Land use code look-up table (as data frame) Returns: Array (grid of integers representing change in ES) """ # Read matrix for this ES lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use') # Get row for start of matrix st_row = (lu_mat['ES_ID']==esid).nonzero()[0][0] + 2 # Read matrix of interest lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use', skiprows=st_row, skip_footer=(120-6-st_row), parse_cols='C:I', index_col=0) # Perform reclassification # Grid of NaNs wih correct shape rc = base.copy()*np.nan # Loop over inidces for x, y in np.ndindex(base.shape): # Get values for baseline and future LU base_lu = base[x, y] fut_lu = fut[x, y] # If both are not nan, reclassify if (np.isfinite(base_lu) and np.isfinite(fut_lu)): # Get the base and fut LU as a string base_str = codes_df.ix[int(base_lu)]['LU_Class'] fut_str = codes_df.ix[int(fut_lu)]['LU_Class'] rc_val = lu_mat.ix[base_str, fut_str] rc[x, y] = rc_val return rc def process_land_use_and_climate_change(lucc_mat_path, lugrid, ccgrid, esid): """ Estimate combined land use and climate change effects for the specified ES. Args: lucc_mat_path: Excel file containing matrices from the workshop. lugrid: The grid of land use change effects. ccgrid: The grid of climate change effects. esid: ES ID from workshop matrices Excel file. Returns: Array (grid of integers representing change in ES) """ # Read matrix for this ES lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU') # Get row for start of matrix st_row = (lucc_mat['ES_ID']==esid).nonzero()[0][0] + 2 # Read matrix of interest lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU', skiprows=st_row, skip_footer=(108-5-st_row), parse_cols='C:I', index_col=0) # Perform reclassification # Grid of NaNs wih correct shape rc = lugrid.copy()*np.nan # Loop over inidces for x, y in np.ndindex(lugrid.shape): # Get values for baseline and future LU lu = lugrid[x, y] cc = ccgrid[x, y] # If both are not nan, reclassify if (np.isfinite(lu) and np.isfinite(cc)): # Get the base and fut LU as a string rc_val = lucc_mat.ix[int(lu), int(cc)] rc[x, y] = rc_val return rc def array_to_gtiff(out_path, data_array, ndv=-9999, xmin=0, ymax=1235000, cell_size=1000): """ Convert numpy array to 16-bit integer GeoTiff. Args: out_path: The .tif file to be created. data_array: The (integer) data array to save. ndv: No data value. xmin: Minimum x (Easting) co-ordinate, in OSGB1936 metres ymax: Maximim y (Northing) co-ordinate, in OSGB1936 metres cell_size: Cell size (metres) Returns: None. Array is saved to specified path. """ # Copy data_array so that it is not modified data = data_array.copy() # Convert NaNs to NDV data[np.isnan(data)] = ndv # Get array shape cols = data.shape[1] rows = data.shape[0] # Get driver driver = gdal.GetDriverByName('GTiff') # NB can't directly create ArcInfo ASCII grids in this way # Create a new raster data source out_ds = driver.Create(out_path, cols, rows, 1, gdal.GDT_Int16) # Get spatial ref details srs = osr.SpatialReference() srs.ImportFromEPSG(27700) # From EPSG for OSGB36 grid # Write metadata out_ds.SetGeoTransform((xmin, cell_size, 0.0, ymax, 0.0, -1*cell_size)) #(xmin, cellsize, 0, ymax, 0, -cellsize) out_ds.SetProjection(srs.ExportToWkt()) out_band = out_ds.GetRasterBand(1) out_band.SetNoDataValue(ndv) out_band.WriteArray(data) # Tidy up del out_ds, out_band # ############################################################################# # User input # Climate data ff_h5_path = r'D:\WBM_Development_2014\WBM_2014_Monthly_Input_File.h5' # Runoff matrices ro_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Runoff_Impacts_Grp1.csv' ro_matrix_15 = r'D:\Eco_Services_Impacts\Matrices_Development\02_Common_Matrices\Runoff_Matrix_15pct.csv' # Land use data base_path = r'D:\Eco_Services_Impacts\Land_Use\baseline_lu_lcm07.txt' fut_path = r'D:\Eco_Services_Impacts\Land_Use\future_lu_2050.txt' # Land use matrices lu_classes_path = r'D:\Eco_Services_Impacts\Land_Use\Land_Use_Classes.csv' lu_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Land_Use_Matrices_Grp1.xlsx' # Land use and climate combined matrices lucc_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Climate_And_Land_Use_Matrices_Grp1.xlsx' # Output folders out_pdf_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\PDF' out_array_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\GeoTiffs' # Time periods to compare base_st_yr, base_end_yr = 1961, 1990 fut_st_yr, fut_end_yr = 2041, 2070 # Future Flows models of interest models = ['afixa', 'afixc', 'afixl', 'afixm', 'afixo', 'afixh', 'afixi', 'afixj', 'afixk', 'afgcx', 'afixq'] # ############################################################################# # Read LU grids base = read_ascii(base_path) base[base==-9999] = np.nan fut = read_ascii(fut_path) fut[fut==-9999] = np.nan # Read LU class codes codes_df = pd.read_csv(lu_classes_path, index_col=0) # Read the runoff matrices ro_df = pd.read_csv(ro_path, index_col=0) # Open H5 file h5 = h5py.File(ff_h5_path, 'r') # Iterate over each ES for idx in ro_df.index: print '\nProcessing land use change impacts for %s.' % ro_df.ix[idx, 'ES'] # 1. Process land use change only luc = process_land_use_change(lu_matrices_path, base, fut, idx, codes_df) # Prepare to save out_name = 'ES%02d_LUC' % idx # Save array out_array = os.path.join(out_array_fold, '%s.tif' % out_name) array_to_gtiff(out_array, luc) # Save PDF out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name) plot_reclassified_grid(luc, out_pdf, sup_title='Change in %s' % ro_df.ix[idx, 'ES'], title='(land use change only)' ) # 2. Process climate change only # Get the relevant months for this ES months = [int(i) for i in ro_df.ix[idx, 'Key_Months'].split(',')] # Loop over climate models of interest for model in models: print ('Processing climate change impacts for ' '%s (model %s).' % (ro_df.ix[idx, 'ES'], model)) # 2.1. Baseline base_rn_av, base_et_av = avg_rain_et(h5, base_st_yr, base_end_yr, months) # 2.2. Future fut_rn_av, fut_et_av = avg_rain_et(h5, fut_st_yr, fut_end_yr, months) # Plot # plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av) # Calculate % change rn_pct = 100*(fut_rn_av - base_rn_av)/base_rn_av et_pct = 100*(fut_et_av - base_et_av)/base_et_av # Reclassify rn_rc = reclass_rn_et_grid(rn_pct) et_rc = reclass_rn_et_grid(et_pct) # plot_reclassified_grid(rn_rc) # plot_reclassified_grid(et_rc) # Generate runoff grid ro = reclass_ro(ro_matrix_15, rn_rc, et_rc) # # Plot runoff grid # plot_reclassified_grid(ro, # sup_title='Change in runoff', # title='(Model %s; %s)' % (model, months)) # Reclass ro grid to estimate ES impact es = reclass_es_ro(idx, ro) # Prepare to save out_name = 'ES%02d_%s' % (idx, model) # Save array out_array = os.path.join(out_array_fold, '%s.tif' % out_name) array_to_gtiff(out_array, es) # Save PDF out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name) plot_reclassified_grid(es, out_pdf, sup_title='Change in %s' % ro_df.ix[idx, 'ES'], title='(climate model %s only)' % model) # 3. Process combined land use and climate effects print ('Processing climate and land use change impacts for ' '%s (model %s).' % (ro_df.ix[idx, 'ES'], model)) # Reclassify to get CC and LUC effects cc_lu = process_land_use_and_climate_change(lucc_matrices_path, luc, es, idx) # Prepare to save out_name = 'ES%02d_LUC_%s' % (idx, model) # Save array out_array = os.path.join(out_array_fold, '%s.tif' % out_name) array_to_gtiff(out_array, cc_lu) # Save PDF out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name) plot_reclassified_grid(cc_lu, out_pdf, sup_title='Change in %s' % ro_df.ix[idx, 'ES'], title='(climate and land use change together)') # Close H5 file h5.close() print '\nFinished.'
mit
7,467,887,343,285,515,000
32.92126
128
0.566229
false
3.327
false
false
false
silky/ProbablyOverthinkingIt
thinkstats2.py
1
69096
"""This file contains code for use with "Think Stats" and "Think Bayes", both by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function, division """This file contains class definitions for: Hist: represents a histogram (map from values to integer frequencies). Pmf: represents a probability mass function (map from values to probs). _DictWrapper: private parent class for Hist and Pmf. Cdf: represents a discrete cumulative distribution function Pdf: represents a continuous probability density function """ import bisect import copy import logging import math import random import re from collections import Counter from operator import itemgetter import thinkplot import numpy as np import pandas import scipy from scipy import stats from scipy import special from scipy import ndimage from io import open ROOT2 = math.sqrt(2) def RandomSeed(x): """Initialize the random and np.random generators. x: int seed """ random.seed(x) np.random.seed(x) def Odds(p): """Computes odds for a given probability. Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor. Note: when p=1, the formula for odds divides by zero, which is normally undefined. But I think it is reasonable to define Odds(1) to be infinity, so that's what this function does. p: float 0-1 Returns: float odds """ if p == 1: return float('inf') return p / (1 - p) def Probability(o): """Computes the probability corresponding to given odds. Example: o=2 means 2:1 odds in favor, or 2/3 probability o: float odds, strictly positive Returns: float probability """ return o / (o + 1) def Probability2(yes, no): """Computes the probability corresponding to given odds. Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability. yes, no: int or float odds in favor """ return yes / (yes + no) class Interpolator(object): """Represents a mapping between sorted sequences; performs linear interp. Attributes: xs: sorted list ys: sorted list """ def __init__(self, xs, ys): self.xs = xs self.ys = ys def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys) def Reverse(self, y): """Looks up y and returns the corresponding value of x.""" return self._Bisect(y, self.ys, self.xs) def _Bisect(self, x, xs, ys): """Helper function.""" if x <= xs[0]: return ys[0] if x >= xs[-1]: return ys[-1] i = bisect.bisect(xs, x) frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1]) y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1]) return y class _DictWrapper(object): """An object that contains a dictionary.""" def __init__(self, obj=None, label=None): """Initializes the distribution. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs label: string label """ self.label = label if label is not None else '_nolegend_' self.d = {} # flag whether the distribution is under a log transform self.log = False if obj is None: return if isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.label = label if label is not None else obj.label if isinstance(obj, dict): self.d.update(obj.items()) elif isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.d.update(obj.Items()) elif isinstance(obj, pandas.Series): self.d.update(obj.value_counts().iteritems()) else: # finally, treat it like a list self.d.update(Counter(obj)) if len(self) > 0 and isinstance(self, Pmf): self.Normalize() def __hash__(self): return id(self) def __str__(self): cls = self.__class__.__name__ return '%s(%s)' % (cls, str(self.d)) __repr__ = __str__ def __eq__(self, other): return self.d == other.d def __len__(self): return len(self.d) def __iter__(self): return iter(self.d) def iterkeys(self): """Returns an iterator over keys.""" return iter(self.d) def __contains__(self, value): return value in self.d def __getitem__(self, value): return self.d.get(value, 0) def __setitem__(self, value, prob): self.d[value] = prob def __delitem__(self, value): del self.d[value] def Copy(self, label=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. label: string label for the new Hist returns: new _DictWrapper with the same type """ new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label return new def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.items(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x) def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.items(): self.Set(x, math.exp(p - m)) def GetDict(self): """Gets the dictionary.""" return self.d def SetDict(self, d): """Sets the dictionary.""" self.d = d def Values(self): """Gets an unsorted sequence of values. Note: one source of confusion is that the keys of this dictionary are the values of the Hist/Pmf, and the values of the dictionary are frequencies/probabilities. """ return self.d.keys() def Items(self): """Gets an unsorted sequence of (value, freq/prob) pairs.""" return self.d.items() def Render(self, **options): """Generates a sequence of points suitable for plotting. Note: options are ignored Returns: tuple of (sorted value sequence, freq/prob sequence) """ if min(self.d.keys()) is np.nan: logging.warning('Hist: contains NaN, may not render correctly.') return zip(*sorted(self.Items())) def MakeCdf(self, label=None): """Makes a Cdf.""" label = label if label is not None else self.label return Cdf(self, label=label) def Print(self): """Prints the values and freqs/probs in ascending order.""" for val, prob in sorted(self.d.items()): print(val, prob) def Set(self, x, y=0): """Sets the freq/prob associated with the value x. Args: x: number value y: number freq or prob """ self.d[x] = y def Incr(self, x, term=1): """Increments the freq/prob associated with the value x. Args: x: number value term: how much to increment by """ self.d[x] = self.d.get(x, 0) + term def Mult(self, x, factor): """Scales the freq/prob associated with the value x. Args: x: number value factor: how much to multiply by """ self.d[x] = self.d.get(x, 0) * factor def Remove(self, x): """Removes a value. Throws an exception if the value is not there. Args: x: value to remove """ del self.d[x] def Total(self): """Returns the total of the frequencies/probabilities in the map.""" total = sum(self.d.values()) return total def MaxLike(self): """Returns the largest frequency/probability in the map.""" return max(self.d.values()) def Largest(self, n=10): """Returns the largest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=True)[:n] def Smallest(self, n=10): """Returns the smallest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=False)[:n] class Hist(_DictWrapper): """Represents a histogram, which is a map from values to frequencies. Values can be any hashable type; frequencies are integer counters. """ def Freq(self, x): """Gets the frequency associated with the value x. Args: x: number value Returns: int frequency """ return self.d.get(x, 0) def Freqs(self, xs): """Gets frequencies for a sequence of values.""" return [self.Freq(x) for x in xs] def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items(): if freq > other.Freq(val): return False return True def Subtract(self, other): """Subtracts the values in the given histogram from this histogram.""" for val, freq in other.Items(): self.Incr(val, -freq) class Pmf(_DictWrapper): """Represents a probability mass function. Values can be any hashable type; probabilities are floating-point. Pmfs are not necessarily normalized. """ def Prob(self, x, default=0): """Gets the probability associated with the value x. Args: x: number value default: value to return if the key is not there Returns: float probability """ return self.d.get(x, default) def Probs(self, xs): """Gets probabilities for a sequence of values.""" return [self.Prob(x) for x in xs] def Percentile(self, percentage): """Computes a percentile of a given Pmf. Note: this is not super efficient. If you are planning to compute more than a few percentiles, compute the Cdf. percentage: float 0-100 returns: value from the Pmf """ p = percentage / 100.0 total = 0 for val, prob in sorted(self.Items()): total += prob if total >= p: return val def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbGreater(self, x) else: t = [prob for (val, prob) in self.d.items() if val > x] return sum(t) def ProbLess(self, x): """Probability that a sample from this Pmf is less than x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbLess(self, x) else: t = [prob for (val, prob) in self.d.items() if val < x] return sum(t) def __lt__(self, obj): """Less than. obj: number or _DictWrapper returns: float probability """ return self.ProbLess(obj) def __gt__(self, obj): """Greater than. obj: number or _DictWrapper returns: float probability """ return self.ProbGreater(obj) def __ge__(self, obj): """Greater than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self < obj) def __le__(self, obj): """Less than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self > obj) def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Normalize: Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('Normalize: total probability is zero.') #logging.warning('Normalize: total probability is zero.') #return total factor = fraction / total for x in self.d: self.d[x] *= factor return total def Random(self): """Chooses a random element from this PMF. Note: this is not very efficient. If you plan to call this more than a few times, consider converting to a CDF. Returns: float value from the Pmf """ target = random.random() total = 0.0 for x, p in self.d.items(): total += p if total >= target: return x # we shouldn't get here raise ValueError('Random: Pmf might not be normalized.') def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mean = 0.0 for x, p in self.d.items(): mean += p * x return mean def Var(self, mu=None): """Computes the variance of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float variance """ if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.items(): var += p * (x - mu) ** 2 return var def Std(self, mu=None): """Computes the standard deviation of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float standard deviation """ var = self.Var(mu) return math.sqrt(var) def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability """ _, val = max((prob, val) for val, prob in self.Items()) return val def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = self.MakeCdf() return cdf.CredibleInterval(percentage) def __add__(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf or a scalar returns: new Pmf """ try: return self.AddPmf(other) except AttributeError: return self.AddConstant(other) def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf def __sub__(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.SubPmf(other) except AttributeError: return self.AddConstant(-other) def SubPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 - v2, p1 * p2) return pmf def __mul__(self, other): """Computes the Pmf of the product of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.MulPmf(other) except AttributeError: return self.MulConstant(other) def MulPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 * v2, p1 * p2) return pmf def MulConstant(self, other): """Computes the Pmf of the product of a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 * other, p1) return pmf def __div__(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.DivPmf(other) except AttributeError: return self.MulConstant(1/other) __truediv__ = __div__ def DivPmf(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 / v2, p1 * p2) return pmf def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.MakeCdf() return cdf.Max(k) class Joint(Pmf): """Represents a joint distribution. The values are sequences (usually tuples) """ def Marginal(self, i, label=None): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf def Conditional(self, i, j, val, label=None): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Assumes that the PMFs represent independent random variables. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint def MakeHistFromList(t, label=None): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this histogram Returns: Hist object """ return Hist(t, label=label) def MakeHistFromDict(d, label=None): """Makes a histogram from a map from values to frequencies. Args: d: dictionary that maps values to frequencies label: string label for this histogram Returns: Hist object """ return Hist(d, label) def MakePmfFromList(t, label=None): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this PMF Returns: Pmf object """ return Pmf(t, label=label) def MakePmfFromDict(d, label=None): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this PMF Returns: Pmf object """ return Pmf(d, label=label) def MakePmfFromItems(t, label=None): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs label: string label for this PMF Returns: Pmf object """ return Pmf(dict(t), label=label) def MakePmfFromHist(hist, label=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object label: string label Returns: Pmf object """ if label is None: label = hist.label return Pmf(hist, label=label) def MakeMixture(metapmf, label='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. label: string label for the new Pmf. Returns: Pmf object. """ mix = Pmf(label=label) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """ pmf = Pmf() for x in np.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf class Cdf(object): """Represents a cumulative distribution function. Attributes: xs: sequence of values ps: sequence of probabilities label: string used as a graph label. """ def __init__(self, obj=None, ps=None, label=None): """Initializes. If ps is provided, obj must be the corresponding list of values. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs ps: list of cumulative probabilities label: string label """ self.label = label if label is not None else '_nolegend_' if isinstance(obj, (_DictWrapper, Cdf, Pdf)): if not label: self.label = label if label is not None else obj.label if obj is None: # caller does not provide obj, make an empty Cdf self.xs = np.asarray([]) self.ps = np.asarray([]) if ps is not None: logging.warning("Cdf: can't pass ps without also passing xs.") return else: # if the caller provides xs and ps, just store them if ps is not None: if isinstance(ps, str): logging.warning("Cdf: ps can't be a string") self.xs = np.asarray(obj) self.ps = np.asarray(ps) return # caller has provided just obj, not ps if isinstance(obj, Cdf): self.xs = copy.copy(obj.xs) self.ps = copy.copy(obj.ps) return if isinstance(obj, _DictWrapper): dw = obj else: dw = Hist(obj) if len(dw) == 0: self.xs = np.asarray([]) self.ps = np.asarray([]) return xs, freqs = zip(*sorted(dw.Items())) self.xs = np.asarray(xs) self.ps = np.cumsum(freqs, dtype=np.float) self.ps /= self.ps[-1] def __str__(self): return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps)) __repr__ = __str__ def __len__(self): return len(self.xs) def __getitem__(self, x): return self.Prob(x) def __setitem__(self): raise UnimplementedMethodException() def __delitem__(self): raise UnimplementedMethodException() def __eq__(self, other): return np.all(self.xs == other.xs) and np.all(self.ps == other.ps) def Copy(self, label=None): """Returns a copy of this Cdf. label: string label for the new Cdf """ if label is None: label = self.label return Cdf(list(self.xs), list(self.ps), label=label) def MakePmf(self, label=None): """Makes a Pmf.""" if label is None: label = self.label return Pmf(self, label=label) def Values(self): """Returns a sorted list of values. """ return self.xs def Items(self): """Returns a sorted sequence of (value, probability) pairs. Note: in Python3, returns an iterator. """ a = self.ps b = np.roll(a, 1) b[0] = 0 return zip(self.xs, a-b) def Shift(self, term): """Adds a term to the xs. term: how much to add """ new = self.Copy() # don't use +=, or else an int array + float yields int array new.xs = new.xs + term return new def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() # don't use *=, or else an int array * float yields int array new.xs = new.xs * factor return new def Prob(self, x): """Returns CDF(x), the probability that corresponds to value x. Args: x: number Returns: float probability """ if x < self.xs[0]: return 0.0 index = bisect.bisect(self.xs, x) p = self.ps[index-1] return p def Probs(self, xs): """Gets probabilities for a sequence of values. xs: any sequence that can be converted to NumPy array returns: NumPy array of cumulative probabilities """ xs = np.asarray(xs) index = np.searchsorted(self.xs, xs, side='right') ps = self.ps[index-1] ps[xs < self.xs[0]] = 0.0 return ps ProbArray = Probs def Value(self, p): """Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value """ if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') index = bisect.bisect_left(self.ps, p) return self.xs[index] def ValueArray(self, ps): """Returns InverseCDF(p), the value that corresponds to probability p. Args: ps: NumPy array of numbers in the range [0, 1] Returns: NumPy array of values """ ps = np.asarray(ps) if np.any(ps < 0) or np.any(ps > 1): raise ValueError('Probability p must be in range [0, 1]') index = np.searchsorted(self.ps, ps, side='left') return self.xs[index] def Percentile(self, p): """Returns the value that corresponds to percentile p. Args: p: number in the range [0, 100] Returns: number value """ return self.Value(p / 100.0) def PercentileRank(self, x): """Returns the percentile rank of the value x. x: potential value in the CDF returns: percentile rank in the range 0 to 100 """ return self.Prob(x) * 100.0 def Random(self): """Chooses a random value from this distribution.""" return self.Value(random.random()) def Sample(self, n): """Generates a random sample from this distribution. n: int length of the sample returns: NumPy array """ ps = np.random.random(n) return self.ValueArray(ps) def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval ConfidenceInterval = CredibleInterval def _Round(self, multiplier=1000.0): """ An entry is added to the cdf only if the percentile differs from the previous value in a significant digit, where the number of significant digits is determined by multiplier. The default is 1000, which keeps log10(1000) = 3 significant digits. """ # TODO(write this method) raise UnimplementedMethodException() def Render(self, **options): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Note: options are ignored Returns: tuple of (xs, ps) """ def interleave(a, b): c = np.empty(a.shape[0] + b.shape[0]) c[::2] = a c[1::2] = b return c a = np.array(self.xs) xs = interleave(a, a) shift_ps = np.roll(self.ps, 1) shift_ps[0] = 0 ps = interleave(shift_ps, self.ps) return xs, ps def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.Copy() cdf.ps **= k return cdf def MakeCdfFromItems(items, label=None): """Makes a cdf from an unsorted sequence of (value, frequency) pairs. Args: items: unsorted sequence of (value, frequency) pairs label: string label for this CDF Returns: cdf: list of (value, fraction) pairs """ return Cdf(dict(items), label=label) def MakeCdfFromDict(d, label=None): """Makes a CDF from a dictionary that maps values to frequencies. Args: d: dictionary that maps values to frequencies. label: string label for the data. Returns: Cdf object """ return Cdf(d, label=label) def MakeCdfFromList(seq, label=None): """Creates a CDF from an unsorted sequence. Args: seq: unsorted sequence of sortable values label: string label for the cdf Returns: Cdf object """ return Cdf(seq, label=label) def MakeCdfFromHist(hist, label=None): """Makes a CDF from a Hist object. Args: hist: Pmf.Hist object label: string label for the data. Returns: Cdf object """ if label is None: label = hist.label return Cdf(hist, label=label) def MakeCdfFromPmf(pmf, label=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object label: string label for the data. Returns: Cdf object """ if label is None: label = pmf.label return Cdf(pmf, label=label) class UnimplementedMethodException(Exception): """Exception if someone calls a method that should be overridden.""" class Suite(Pmf): """Represents a suite of hypotheses and their probabilities.""" def Update(self, data): """Updates each hypothesis based on the data. data: any representation of the data returns: the normalizing constant """ for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data """ for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like) def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant """ for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdateSet(self, dataset): """Updates each hypothesis based on the dataset. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: None """ for data in dataset: self.LogUpdate(data) def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def LogLikelihood(self, data, hypo): """Computes the log likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob) def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo) def MakeProbs(self): """Transforms from odds to probabilities.""" for hypo, odds in self.Items(): self.Set(hypo, Probability(odds)) def MakeSuiteFromList(t, label=None): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this suite Returns: Suite object """ hist = MakeHistFromList(t, label=label) d = hist.GetDict() return MakeSuiteFromDict(d) def MakeSuiteFromHist(hist, label=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object label: string label Returns: Suite object """ if label is None: label = hist.label # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, label) def MakeSuiteFromDict(d, label=None): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this suite Returns: Suite object """ suite = Suite(label=label) suite.SetDict(d) suite.Normalize() return suite class Pdf(object): """Represents a probability density function (PDF).""" def Density(self, x): """Evaluates this Pdf at x. Returns: float or NumPy array of probability density """ raise UnimplementedMethodException() def GetLinspace(self): """Get a linspace for plotting. Not all subclasses of Pdf implement this. Returns: numpy array """ raise UnimplementedMethodException() def MakePmf(self, **options): """Makes a discrete version of this Pdf. options can include label: string low: low end of range high: high end of range n: number of places to evaluate Returns: new Pmf """ label = options.pop('label', '') xs, ds = self.Render(**options) return Pmf(dict(zip(xs, ds)), label=label) def Render(self, **options): """Generates a sequence of points suitable for plotting. If options includes low and high, it must also include n; in that case the density is evaluated an n locations between low and high, including both. If options includes xs, the density is evaluate at those location. Otherwise, self.GetLinspace is invoked to provide the locations. Returns: tuple of (xs, densities) """ low, high = options.pop('low', None), options.pop('high', None) if low is not None and high is not None: n = options.pop('n', 101) xs = np.linspace(low, high, n) else: xs = options.pop('xs', None) if xs is None: xs = self.GetLinspace() ds = self.Density(xs) return xs, ds def Items(self): """Generates a sequence of (value, probability) pairs. """ return zip(*self.Render()) class NormalPdf(Pdf): """Represents the PDF of a Normal distribution.""" def __init__(self, mu=0, sigma=1, label=None): """Constructs a Normal Pdf with given mu and sigma. mu: mean sigma: standard deviation label: string """ self.mu = mu self.sigma = sigma self.label = label if label is not None else '_nolegend_' def __str__(self): return 'NormalPdf(%f, %f)' % (self.mu, self.sigma) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = self.mu-3*self.sigma, self.mu+3*self.sigma return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.norm.pdf(xs, self.mu, self.sigma) class ExponentialPdf(Pdf): """Represents the PDF of an exponential distribution.""" def __init__(self, lam=1, label=None): """Constructs an exponential Pdf with given parameter. lam: rate parameter label: string """ self.lam = lam self.label = label if label is not None else '_nolegend_' def __str__(self): return 'ExponentialPdf(%f)' % (self.lam) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = 0, 5.0/self.lam return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.expon.pdf(xs, scale=1.0/self.lam) class EstimatedPdf(Pdf): """Represents a PDF estimated by KDE.""" def __init__(self, sample, label=None): """Estimates the density function based on a sample. sample: sequence of data label: string """ self.label = label if label is not None else '_nolegend_' self.kde = stats.gaussian_kde(sample) low = min(sample) high = max(sample) self.linspace = np.linspace(low, high, 101) def __str__(self): return 'EstimatedPdf(label=%s)' % str(self.label) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ return self.linspace def Density(self, xs): """Evaluates this Pdf at xs. returns: float or NumPy array of probability density """ return self.kde.evaluate(xs) def Sample(self, n): """Generates a random sample from the estimated Pdf. n: size of sample """ # NOTE: we have to flatten because resample returns a 2-D # array for some reason. return self.kde.resample(n).flatten() def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total def PmfProbGreater(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 > v2: total += p1 * p2 return total def PmfProbEqual(pmf1, pmf2): """Probability that a value from pmf1 equals a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 == v2: total += p1 * p2 return total def RandomSum(dists): """Chooses a random value from each dist and returns the sum. dists: sequence of Pmf or Cdf objects returns: numerical sum """ total = sum(dist.Random() for dist in dists) return total def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = Pmf(RandomSum(dists) for i in range(n)) return pmf def EvalNormalPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density """ return stats.norm.pdf(x, mu, sigma) def MakeNormalPmf(mu, sigma, num_sigmas, n=201): """Makes a PMF discrete approx to a Normal distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in np.linspace(low, high, n): p = EvalNormalPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf def EvalBinomialPmf(k, n, p): """Evaluates the binomial PMF. Returns the probabily of k successes in n trials with probability p. """ return stats.binom.pmf(k, n, p) def EvalHypergeomPmf(k, N, K, n): """Evaluates the hypergeometric PMF. Returns the probabily of k successes in n trials from a population N with K successes in it. """ return stats.hypergeom.pmf(k, N, K, n) def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / special.gamma(k+1) def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k in range(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf def EvalExponentialPdf(x, lam): """Computes the exponential PDF. x: value lam: parameter lambda in events per unit time returns: float probability density """ return lam * math.exp(-lam * x) def EvalExponentialCdf(x, lam): """Evaluates CDF of the exponential distribution with parameter lam.""" return 1 - math.exp(-lam * x) def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() for x in np.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf def StandardNormalCdf(x): """Evaluates the CDF of the standard Normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution #Cumulative_distribution_function Args: x: float Returns: float """ return (math.erf(x / ROOT2) + 1) / 2 def EvalNormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the normal distribution. Args: x: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.cdf(x, loc=mu, scale=sigma) def EvalNormalCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.ppf(p, loc=mu, scale=sigma) def EvalLognormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the lognormal distribution. x: float or sequence mu: mean parameter sigma: standard deviation parameter Returns: float or sequence """ return stats.lognorm.cdf(x, loc=mu, scale=sigma) def RenderExpoCdf(lam, low, high, n=101): """Generates sequences of xs and ps for an exponential CDF. lam: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = 1 - np.exp(-lam * xs) #ps = stats.expon.cdf(xs, scale=1.0/lam) return xs, ps def RenderNormalCdf(mu, sigma, low, high, n=101): """Generates sequences of xs and ps for a Normal CDF. mu: parameter sigma: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = stats.norm.cdf(xs, mu, sigma) return xs, ps def RenderParetoCdf(xmin, alpha, low, high, n=50): """Generates sequences of xs and ps for a Pareto CDF. xmin: parameter alpha: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ if low < xmin: low = xmin xs = np.linspace(low, high, n) ps = 1 - (xs / xmin) ** -alpha #ps = stats.pareto.cdf(xs, scale=xmin, b=alpha) return xs, ps class Beta(object): """Represents a Beta distribution. See http://en.wikipedia.org/wiki/Beta_distribution """ def __init__(self, alpha=1, beta=1, label=None): """Initializes a Beta distribution.""" self.alpha = alpha self.beta = beta self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """ heads, tails = data self.alpha += heads self.beta += tails def Mean(self): """Computes the mean of this distribution.""" return self.alpha / (self.alpha + self.beta) def Random(self): """Generates a random variate from this distribution.""" return random.betavariate(self.alpha, self.beta) def Sample(self, n): """Generates a random sample from this distribution. n: int sample size """ size = n, return np.random.beta(self.alpha, self.beta, size) def EvalPdf(self, x): """Evaluates the PDF at x.""" return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1) def MakePmf(self, steps=101, label=None): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. """ if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in range(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = Pmf(dict(zip(xs, probs)), label=label) return pmf def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps - 1.0) for i in range(steps)] ps = [special.betainc(self.alpha, self.beta, x) for x in xs] cdf = Cdf(xs, ps) return cdf class Dirichlet(object): """Represents a Dirichlet distribution. See http://en.wikipedia.org/wiki/Dirichlet_distribution """ def __init__(self, n, conc=1, label=None): """Initializes a Dirichlet distribution. n: number of dimensions conc: concentration parameter (smaller yields more concentration) label: string label """ if n < 2: raise ValueError('A Dirichlet distribution with ' 'n<2 makes no sense') self.n = n self.params = np.ones(n, dtype=np.float) * conc self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params """ m = len(data) self.params[:m] += data def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """ p = np.random.gamma(self.params) return p / p.sum() def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod() def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data) if self.n < m: return float('-inf') x = self.Random() y = np.log(x[:m]) * data return y.sum() def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha) def PredictivePmf(self, xs, label=None): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return Pmf(zip(xs, ps), label=label) def BinomialCoef(n, k): """Compute the binomial coefficient "n choose k". n: number of trials k: number of successes Returns: float """ return scipy.misc.comb(n, k) def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k) def NormalProbability(ys, jitter=0.0): """Generates data for a normal probability plot. ys: sequence of values jitter: float magnitude of jitter added to the ys returns: numpy arrays xs, ys """ n = len(ys) xs = np.random.normal(0, 1, n) xs.sort() if jitter: ys = Jitter(ys, jitter) else: ys = np.array(ys) ys.sort() return xs, ys def Jitter(values, jitter=0.5): """Jitters the values by adding a uniform variate in (-jitter, jitter). values: sequence jitter: scalar magnitude of jitter returns: new numpy array """ n = len(values) return np.random.uniform(-jitter, +jitter, n) + values def NormalProbabilityPlot(sample, fit_color='0.8', **options): """Makes a normal probability plot with a fitted line. sample: sequence of numbers fit_color: color string for the fitted line options: passed along to Plot """ xs, ys = NormalProbability(sample) mean, var = MeanVar(sample) std = math.sqrt(var) fit = FitLine(xs, mean, std) thinkplot.Plot(*fit, color=fit_color, label='model') xs, ys = NormalProbability(sample) thinkplot.Plot(xs, ys, **options) def Mean(xs): """Computes mean. xs: sequence of values returns: float mean """ return np.mean(xs) def Var(xs, mu=None, ddof=0): """Computes variance. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ xs = np.asarray(xs) if mu is None: mu = xs.mean() ds = xs - mu return np.dot(ds, ds) / (len(xs) - ddof) def Std(xs, mu=None, ddof=0): """Computes standard deviation. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ var = Var(xs, mu, ddof) return math.sqrt(var) def MeanVar(xs, ddof=0): """Computes mean and variance. Based on http://stackoverflow.com/questions/19391149/ numpy-mean-and-variance-from-single-function xs: sequence of values ddof: delta degrees of freedom returns: pair of float, mean and var """ xs = np.asarray(xs) mean = xs.mean() s2 = Var(xs, mean, ddof) return mean, s2 def Trim(t, p=0.01): """Trims the largest and smallest elements of t. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: sequence of values """ n = int(p * len(t)) t = sorted(t)[n:-n] return t def TrimmedMean(t, p=0.01): """Computes the trimmed mean of a sequence of numbers. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) return Mean(t) def TrimmedMeanVar(t, p=0.01): """Computes the trimmed mean and variance of a sequence of numbers. Side effect: sorts the list. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) mu, var = MeanVar(t) return mu, var def CohenEffectSize(group1, group2): """Compute Cohen's d. group1: Series or NumPy array group2: Series or NumPy array returns: float """ diff = group1.mean() - group2.mean() n1, n2 = len(group1), len(group2) var1 = group1.var() var2 = group2.var() pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2) d = diff / math.sqrt(pooled_var) return d def Cov(xs, ys, meanx=None, meany=None): """Computes Cov(X, Y). Args: xs: sequence of values ys: sequence of values meanx: optional float mean of xs meany: optional float mean of ys Returns: Cov(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) if meanx is None: meanx = np.mean(xs) if meany is None: meany = np.mean(ys) cov = np.dot(xs-meanx, ys-meany) / len(xs) return cov def Corr(xs, ys): """Computes Corr(X, Y). Args: xs: sequence of values ys: sequence of values Returns: Corr(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) meanx, varx = MeanVar(xs) meany, vary = MeanVar(ys) corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary) return corr def SerialCorr(series, lag=1): """Computes the serial correlation of a series. series: Series lag: integer number of intervals to shift returns: float correlation """ xs = series[lag:] ys = series.shift(lag)[lag:] corr = Corr(xs, ys) return corr def SpearmanCorr(xs, ys): """Computes Spearman's rank correlation. Args: xs: sequence of values ys: sequence of values Returns: float Spearman's correlation """ xranks = pandas.Series(xs).rank() yranks = pandas.Series(ys).rank() return Corr(xranks, yranks) def MapToRanks(t): """Returns a list of ranks corresponding to the elements in t. Args: t: sequence of numbers Returns: list of integer ranks, starting at 1 """ # pair up each value with its index pairs = enumerate(t) # sort by value sorted_pairs = sorted(pairs, key=itemgetter(1)) # pair up each pair with its rank ranked = enumerate(sorted_pairs) # sort by index resorted = sorted(ranked, key=lambda trip: trip[1][0]) # extract the ranks ranks = [trip[0]+1 for trip in resorted] return ranks def LeastSquares(xs, ys): """Computes a linear least squares fit for ys as a function of xs. Args: xs: sequence of values ys: sequence of values Returns: tuple of (intercept, slope) """ meanx, varx = MeanVar(xs) meany = Mean(ys) slope = Cov(xs, ys, meanx, meany) / varx inter = meany - slope * meanx return inter, slope def FitLine(xs, inter, slope): """Fits a line to the given data. xs: sequence of x returns: tuple of numpy arrays (sorted xs, fit ys) """ fit_xs = np.sort(xs) fit_ys = inter + slope * fit_xs return fit_xs, fit_ys def Residuals(xs, ys, inter, slope): """Computes residuals for a linear fit with parameters inter and slope. Args: xs: independent variable ys: dependent variable inter: float intercept slope: float slope Returns: list of residuals """ xs = np.asarray(xs) ys = np.asarray(ys) res = ys - (inter + slope * xs) return res def CoefDetermination(ys, res): """Computes the coefficient of determination (R^2) for given residuals. Args: ys: dependent variable res: residuals Returns: float coefficient of determination """ return 1 - Var(res) / Var(ys) def CorrelatedGenerator(rho): """Generates standard normal variates with serial correlation. rho: target coefficient of correlation Returns: iterable """ x = random.gauss(0, 1) yield x sigma = math.sqrt(1 - rho**2) while True: x = random.gauss(x * rho, sigma) yield x def CorrelatedNormalGenerator(mu, sigma, rho): """Generates normal variates with serial correlation. mu: mean of variate sigma: standard deviation of variate rho: target coefficient of correlation Returns: iterable """ for x in CorrelatedGenerator(rho): yield x * sigma + mu def RawMoment(xs, k): """Computes the kth raw moment of xs. """ return sum(x**k for x in xs) / len(xs) def CentralMoment(xs, k): """Computes the kth central moment of xs. """ mean = RawMoment(xs, 1) return sum((x - mean)**k for x in xs) / len(xs) def StandardizedMoment(xs, k): """Computes the kth standardized moment of xs. """ var = CentralMoment(xs, 2) std = math.sqrt(var) return CentralMoment(xs, k) / std**k def Skewness(xs): """Computes skewness. """ return StandardizedMoment(xs, 3) def Median(xs): """Computes the median (50th percentile) of a sequence. xs: sequence or anything else that can initialize a Cdf returns: float """ cdf = Cdf(xs) return cdf.Value(0.5) def IQR(xs): """Computes the interquartile of a sequence. xs: sequence or anything else that can initialize a Cdf returns: pair of floats """ cdf = Cdf(xs) return cdf.Value(0.25), cdf.Value(0.75) def PearsonMedianSkewness(xs): """Computes the Pearson median skewness. """ median = Median(xs) mean = RawMoment(xs, 1) var = CentralMoment(xs, 2) std = math.sqrt(var) gp = 3 * (mean - median) / std return gp class FixedWidthVariables(object): """Represents a set of variables in a fixed width file.""" def __init__(self, variables, index_base=0): """Initializes. variables: DataFrame index_base: are the indices 0 or 1 based? Attributes: colspecs: list of (start, end) index tuples names: list of string variable names """ self.variables = variables # note: by default, subtract 1 from colspecs self.colspecs = variables[['start', 'end']] - index_base # convert colspecs to a list of pair of int self.colspecs = self.colspecs.astype(np.int).values.tolist() self.names = variables['name'] def ReadFixedWidth(self, filename, **options): """Reads a fixed width ASCII file. filename: string filename returns: DataFrame """ df = pandas.read_fwf(filename, colspecs=self.colspecs, names=self.names, **options) return df def ReadStataDct(dct_file, **options): """Reads a Stata dictionary file. dct_file: string filename options: dict of options passed to open() returns: FixedWidthVariables object """ type_map = dict(byte=int, int=int, long=int, float=float, double=float) var_info = [] for line in open(dct_file, **options): match = re.search( r'_column\(([^)]*)\)', line) if match: start = int(match.group(1)) t = line.split() vtype, name, fstring = t[1:4] name = name.lower() if vtype.startswith('str'): vtype = str else: vtype = type_map[vtype] long_desc = ' '.join(t[4:]).strip('"') var_info.append((start, vtype, name, fstring, long_desc)) columns = ['start', 'type', 'name', 'fstring', 'desc'] variables = pandas.DataFrame(var_info, columns=columns) # fill in the end column by shifting the start column variables['end'] = variables.start.shift(-1) variables.loc[len(variables)-1, 'end'] = 0 dct = FixedWidthVariables(variables, index_base=1) return dct def Resample(xs, n=None): """Draw a sample from xs with the same length as xs. xs: sequence n: sample size (default: len(xs)) returns: NumPy array """ if n is None: n = len(xs) return np.random.choice(xs, n, replace=True) def SampleRows(df, nrows, replace=False): """Choose a sample of rows from a DataFrame. df: DataFrame nrows: number of rows replace: whether to sample with replacement returns: DataDf """ indices = np.random.choice(df.index, nrows, replace=replace) sample = df.loc[indices] return sample def ResampleRows(df): """Resamples rows from a DataFrame. df: DataFrame returns: DataFrame """ return SampleRows(df, len(df), replace=True) def ResampleRowsWeighted(df, column='finalwgt'): """Resamples a DataFrame using probabilities proportional to given column. df: DataFrame column: string column name to use as weights returns: DataFrame """ weights = df[column] cdf = Cdf(dict(weights)) indices = cdf.Sample(len(weights)) sample = df.loc[indices] return sample def PercentileRow(array, p): """Selects the row from a sorted array that maps to percentile p. p: float 0--100 returns: NumPy array (one row) """ rows, cols = array.shape index = int(rows * p / 100) return array[index,] def PercentileRows(ys_seq, percents): """Given a collection of lines, selects percentiles along vertical axis. For example, if ys_seq contains simulation results like ys as a function of time, and percents contains (5, 95), the result would be a 90% CI for each vertical slice of the simulation results. ys_seq: sequence of lines (y values) percents: list of percentiles (0-100) to select returns: list of NumPy arrays, one for each percentile """ nrows = len(ys_seq) ncols = len(ys_seq[0]) array = np.zeros((nrows, ncols)) for i, ys in enumerate(ys_seq): array[i,] = ys array = np.sort(array, axis=0) rows = [PercentileRow(array, p) for p in percents] return rows def Smooth(xs, sigma=2, **options): """Smooths a NumPy array with a Gaussian filter. xs: sequence sigma: standard deviation of the filter """ return ndimage.filters.gaussian_filter1d(xs, sigma, **options) class HypothesisTest(object): """Represents a hypothesis test.""" def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data self.MakeModel() self.actual = self.TestStatistic(data) self.test_stats = None self.test_cdf = None def PValue(self, iters=1000): """Computes the distribution of the test statistic and p-value. iters: number of iterations returns: float p-value """ self.test_stats = [self.TestStatistic(self.RunModel()) for _ in range(iters)] self.test_cdf = Cdf(self.test_stats) count = sum(1 for x in self.test_stats if x >= self.actual) return count / iters def MaxTestStat(self): """Returns the largest test statistic seen during simulations. """ return max(self.test_stats) def PlotCdf(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ def VertLine(x): """Draws a vertical line at x.""" thinkplot.Plot([x, x], [0, 1], color='0.8') VertLine(self.actual) thinkplot.Cdf(self.test_cdf, label=label) def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ raise UnimplementedMethodException() def MakeModel(self): """Build a model of the null hypothesis. """ pass def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ raise UnimplementedMethodException() def main(): pass if __name__ == '__main__': main()
mit
1,973,675,057,824,859,000
23.589324
79
0.577964
false
3.902626
false
false
false
redhat-openstack/rdo-infra
ci-scripts/dlrnapi_promoter/test_dlrn_integration.py
1
6534
""" This test is launched as part of the existing tox command It tests if promoter and dlrn server are interacting correctly Uses standard pytest fixture as a setup/teardown method """ import logging import os import promoter_integration_checks import pytest import yaml from common import close_logging from config_legacy import PromoterLegacyConfig try: import urllib2 as url except ImportError: import urllib.request as url from dlrn_hash import DlrnAggregateHash, DlrnCommitDistroHash, DlrnHash from dlrnapi_client.rest import ApiException from logic import Promoter from stage import main as stage_main @pytest.fixture(scope='function', params=['dlrn_legacyconf_single', 'dlrn_legacyconf_integration']) def staged_env(request): """ Fixture that runs the staging environment provisioner with parameters, yield the stage_info file produced and cleans up after It has two parameters by default, to test the interaction for single pipeline and for integration pipeline :return: yields the stage_info dict """ close_logging("promoter-staging") close_logging("promoter") log = logging.getLogger('promoter-staging') setup_cmd_line = "" teardown_cmd_line = "" # We are going to call the main in the staging passing a composed command # line, so we are testing also that the argument parsing is working # correctly instead of passing configuration directly release_config = \ "CentOS-7/master.yaml" promoter_config_file = "staging/CentOS-7/master.ini" setup_cmd_line += " --scenes dlrn" try: test_case = request.param except AttributeError: pass except KeyError: log.error("Invalid test case '{}'".format(request.param)) raise # for the tests of the integration pipeline we need to pass a different # file with db data if "_integration" in test_case: release_config = \ "CentOS-8/master.yaml" promoter_config_file = \ "staging/CentOS-8/master.ini" setup_cmd_line += " --db-data-file integration-pipeline.yaml" teardown_cmd_line += " --db-data-file integration-pipeline.yaml" setup_cmd_line += " setup --release-config {}".format(release_config) teardown_cmd_line += " teardown" log.info("Running cmd line: {}".format(setup_cmd_line)) config = stage_main(setup_cmd_line) stage_info_path = config['stage_info_path'] with open(stage_info_path, "r") as stage_info_file: stage_info = yaml.safe_load(stage_info_file) overrides = { 'log_file': stage_info['main']['log_file'], 'repo_url': stage_info['dlrn']['server']['repo_url'], 'allowed_clients': 'dlrn_client', 'config_file': promoter_config_file, } overrides_obj = type("FakeArgs", (), overrides) os.environ["DLRNAPI_PASSWORD"] = stage_info['dlrn']['server']['password'] if 'legacyconf' in test_case: config = PromoterLegacyConfig(overrides_obj.config_file, overrides=overrides_obj) else: raise Exception("New config engine is not implemented yet") promoter = Promoter(config) yield stage_info, promoter log.info("Running cmd line: {}".format(teardown_cmd_line)) stage_main(teardown_cmd_line) @pytest.mark.serial def test_dlrn_server(staged_env): """ General server testing, with a single promotion :param staged_env: The staged env fixture :return: None """ stage_info, promoter = staged_env commit = stage_info['dlrn']['promotions']['promotion_candidate'] candidate_label = commit['name'] promote_name = stage_info['dlrn']['promotion_target'] repo_url = stage_info['dlrn']['server']['repo_url'] client = promoter.dlrn_client dlrn_hash = DlrnHash(source=commit) dlrn_hash.label = candidate_label # TODO: Check db injection (needs sqlite3 import) # Check we can access dlrnapi try: client.promote(dlrn_hash, promote_name, candidate_label=candidate_label, create_previous=False) assert True, "Dlrn api responding" except ApiException as e: msg = "Exception when calling DefaultApi->api_promote_post: %s\n" % e assert False, msg # Check if we can access repo_url and get the versions file versions_url = os.path.join(repo_url, promote_name, 'versions.csv') try: url.urlopen(versions_url) assert True, "Versions file found" except IOError: assert False, "No versions file generated" @pytest.mark.serial def test_select_candidates(staged_env): """ Testing the selection of candidates hashes after fetching them from the server :param staged_env: The staged env fixture :param promoter: The promoter fixture :return: None """ stage_info, promoter = staged_env candidate_hashes_list = [] for target_label, candidate_label in \ promoter.config.promotion_steps_map.items(): candidate_hashes_list = promoter.select_candidates(candidate_label, target_label) assert candidate_hashes_list != [] if stage_info['main']['pipeline_type'] == "integration": assert type(candidate_hashes_list[0]) == DlrnAggregateHash elif stage_info['main']['pipeline_type'] == "single": assert type(candidate_hashes_list[0]) == DlrnCommitDistroHash def test_promote_all_links(staged_env): """ Testing the promotion of candidates inside promote_all_links, but limited to the dlrn part :param staged_env: The staged env fixture :param promoter: The promoter fixture :return: None """ stage_info, promoter = staged_env promoted_pairs = promoter.promote_all() for promoted_hash, label in promoted_pairs: if stage_info['main']['pipeline_type'] == "single": error_msg = "Single pipeline should promote a commit/distro hash" assert type(promoted_hash) == DlrnCommitDistroHash, error_msg elif stage_info['main']['pipeline_type'] == "integration": error_msg = "Integration pipeline should promote an aggregate hash" assert type(promoted_hash) == DlrnAggregateHash, error_msg promoter_integration_checks.check_dlrn_promoted_hash( stage_info=stage_info) error_msg = "Nothing promoted, and checks failed to detect issues" assert len(promoted_pairs) != 0, error_msg
apache-2.0
3,242,506,652,930,499,000
33.389474
79
0.662687
false
3.816589
true
false
false
diego-d5000/MisValesMd
env/lib/python2.7/site-packages/django/contrib/auth/tests/custom_user.py
1
3775
from django.contrib.auth.models import ( AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission, PermissionsMixin, UserManager, ) from django.db import models # The custom User uses email as the unique identifier, and requires # that every user provide a date of birth. This lets us test # changes in username datatype, and non-text required fields. class CustomUserManager(BaseUserManager): def create_user(self, email, date_of_birth, password=None): """ Creates and saves a User with the given email and password. """ if not email: raise ValueError('Users must have an email address') user = self.model( email=self.normalize_email(email), date_of_birth=date_of_birth, ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password, date_of_birth): u = self.create_user(email, password=password, date_of_birth=date_of_birth) u.is_admin = True u.save(using=self._db) return u class CustomUser(AbstractBaseUser): email = models.EmailField(verbose_name='email address', max_length=255, unique=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) date_of_birth = models.DateField() custom_objects = CustomUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['date_of_birth'] class Meta: app_label = 'auth' def get_full_name(self): return self.email def get_short_name(self): return self.email def __unicode__(self): return self.email # Maybe required? def get_group_permissions(self, obj=None): return set() def get_all_permissions(self, obj=None): return set() def has_perm(self, perm, obj=None): return True def has_perms(self, perm_list, obj=None): return True def has_module_perms(self, app_label): return True # Admin required fields @property def is_staff(self): return self.is_admin class RemoveGroupsAndPermissions(object): """ A context manager to temporarily remove the groups and user_permissions M2M fields from the AbstractUser class, so they don't clash with the related_name sets. """ def __enter__(self): self._old_au_local_m2m = AbstractUser._meta.local_many_to_many self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many groups = models.ManyToManyField(Group, blank=True) groups.contribute_to_class(PermissionsMixin, "groups") user_permissions = models.ManyToManyField(Permission, blank=True) user_permissions.contribute_to_class(PermissionsMixin, "user_permissions") PermissionsMixin._meta.local_many_to_many = [groups, user_permissions] AbstractUser._meta.local_many_to_many = [groups, user_permissions] def __exit__(self, exc_type, exc_value, traceback): AbstractUser._meta.local_many_to_many = self._old_au_local_m2m PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m # The extension user is a simple extension of the built-in user class, # adding a required date_of_birth field. This allows us to check for # any hard references to the name "User" in forms/handlers etc. with RemoveGroupsAndPermissions(): class ExtensionUser(AbstractUser): date_of_birth = models.DateField() custom_objects = UserManager() REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth'] class Meta: app_label = 'auth'
mit
-8,085,511,089,587,579,000
31.40708
88
0.647947
false
3.965336
false
false
false
creasyw/IMTAphy
modules/phy/copper/PyConfig/copper/TimeDependentDistBER.py
1
5027
############################################################################### # This file is part of openWNS (open Wireless Network Simulator) # _____________________________________________________________________________ # # Copyright (C) 2004-2009 # Chair of Communication Networks (ComNets) # Kopernikusstr. 5, D-52074 Aachen, Germany # phone: ++49-241-80-27910, # fax: ++49-241-80-22242 # email: [email protected] # www: http://www.openwns.org # _____________________________________________________________________________ # # openWNS is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License version 2 as published by the # Free Software Foundation; # # openWNS is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import openwns.distribution import openwns.pyconfig from math import pi from math import exp from math import log10 class TimeDependentDistBER(openwns.pyconfig.Sealed): """This class is providing a time distributed BER in the form: BER | * * | * * | * * | * * | * * | * * | * * | * * | * * | *** |____________|____________ time Distance:start middle end After giving the start,the middle distance and the step the rest from middle to end will be generated automatically. The BER is then calculated from the distances between the nodes and is distributed in time usind wns.Distribution.TimeDependent(time,wns.Distribution.Uniform). The curve can be repeated as many times as needed. """ B=None Ps=None gs= None gr=None gamma=None f= None c= None _lambda=None d0=None k=None T=None def __init__ (self, dataRate, efficiency =1.0, Ps = 0.1, gs = 1, gr = 1, gamma = 2.4, f = 5.5*1E+9, c = 3.0*1E+8, d0 = 1.0, k = 1.38*1E-23, T = 290): self.B = dataRate/efficiency self.Ps = Ps self.gs = gs self.gr = gr self.gamma = gamma self.f = f self.c = c self._lambda = c/f self.d0 = d0 self.k = k self.T = T def getDistribution(self, simulationTime, repeatBERCurve, startDistance, middleDistance, step): dist = openwns.distribution.TimeDependent() start = startDistance middle = middleDistance distanceList = [] step = step time = 0 last = None for i in xrange(start, middle, -step): distanceList.append(i) last=i for i in xrange(last, start+step, step): distanceList.append(i) deltaT = (simulationTime/repeatBERCurve) / len(distanceList) for k in xrange(repeatBERCurve): for j in xrange(len(distanceList)): dist.eventList.append(openwns.distribution.Event(time, openwns.distribution.Uniform(1.4*self.getBER(distanceList[j]), 0.6*self.getBER(distanceList[j])))) time = time + deltaT return dist def getBER(self, distance): Noise=self.k*self.T*self.B Noise_dbm=10*log10(Noise*1000) const=self.Ps*self.gs*self.gr*pow((self._lambda/(4*pi*self.d0)),2) Pr=const*pow((self.d0/distance),self.gamma) SINR=10*log10(Pr*1000)-Noise_dbm BER=self.getQ(pow(2*SINR,0.5)) return BER def getQ(self, x): Q=((1.0/x*pow(2*pi,0.5))*exp(-(pow(x,2)/2))) return Q def findDistanceForThreshold(self, distance, threshold, side): # side = 1 means bigger than the threshold, side = 0 means smaller than the threshold if side == 1: if self.getBER(distance) >= threshold: return distance if side == 0: if self.getBER(distance) < threshold: return distance return 0 def findDistanceForThresholdFromList(self, distanceList, threshold, side): # side = 1 means bigger than the threshold, side = 0 means smaller than the threshold if side == 1: for j in xrange(len(distanceList)): if self.getBER(distanceList[j]) >= threshold: return distanceList[j] if side == 0: for i in xrange(len(distanceList)): if self.getBER(distanceList[i])<threshold: return distanceList[i]
gpl-2.0
-1,640,753,965,825,216,300
36.514925
169
0.536901
false
3.927344
false
false
false
sniemi/SamPy
sandbox/src1/examples/multi_image.py
1
1769
#!/usr/bin/env python ''' Make a set of images with a single colormap, norm, and colorbar. It also illustrates colorbar tick labelling with a multiplier. ''' from matplotlib.pyplot import figure, show, sci from matplotlib import cm, colors from matplotlib.font_manager import FontProperties from numpy import amin, amax, ravel from numpy.random import rand Nr = 3 Nc = 2 fig = figure() cmap = cm.cool figtitle = 'Multiple images' t = fig.text(0.5, 0.95, figtitle, horizontalalignment='center', fontproperties=FontProperties(size=16)) cax = fig.add_axes([0.2, 0.08, 0.6, 0.04]) w = 0.4 h = 0.22 ax = [] images = [] vmin = 1e40 vmax = -1e40 for i in range(Nr): for j in range(Nc): pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h] a = fig.add_axes(pos) if i > 0: a.set_xticklabels([]) # Make some fake data with a range that varies # somewhat from one plot to the next. data =((1+i+j)/10.0)*rand(10,20)*1e-6 dd = ravel(data) # Manually find the min and max of all colors for # use in setting the color scale. vmin = min(vmin, amin(dd)) vmax = max(vmax, amax(dd)) images.append(a.imshow(data, cmap=cmap)) ax.append(a) # Set the first image as the master, with all the others # observing it for changes in cmap or norm. norm = colors.Normalize(vmin=vmin, vmax=vmax) for i, im in enumerate(images): im.set_norm(norm) if i > 0: images[0].add_observer(im) # The colorbar is also based on this master image. fig.colorbar(images[0], cax, orientation='horizontal') # We need the following only if we want to run this # script interactively and be able to change the colormap. sci(images[0]) show()
bsd-2-clause
9,214,454,940,438,378,000
23.232877
64
0.637648
false
3.114437
false
false
false
jhnphm/boar
front.py
1
29536
# -*- coding: utf-8 -*- # Copyright 2010 Mats Ekberg # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Front class serves two important purposes. First, it is the API of boar. All interaction with a repository must happen through this interface. Secondly, all arguments and return values are primitive values that can be serialized easily, which makes it easy to implement an RPC mechanism for this interface. """ from blobrepo import repository from boar_exceptions import * import sys from time import ctime, time from common import md5sum, is_md5sum, warn, get_json_module, StopWatch, calculate_progress from boar_common import SimpleProgressPrinter from blobrepo.sessions import bloblist_fingerprint import copy json = get_json_module() import base64 def get_file_contents(front, session_name, file_name): """This is a convenience function to get the full contents of a named file from the latest revision of a named session. It must only be used on files that are known to be of a reasonable size. The session must exist or an SessionNotFoundError will the thrown. If there is a session, but no matching file, None is returned.""" rev = front.find_last_revision(session_name) if not rev: raise SessionNotFoundError("No such session: %s" % session_name) for blobinfo in front.get_session_bloblist(rev): if blobinfo['filename'] == file_name: blob_reader = front.get_blob(blobinfo['md5sum']) return blob_reader.read() return None def add_file_simple(front, filename, contents): """Adds a file with contents to a new snapshot. The front instance "create_session()" must have been called before this function is used, or an exception will be thrown.""" content_checksum = md5sum(contents) if not front.has_blob(content_checksum) and not front.new_snapshot_has_blob(content_checksum): front.init_new_blob(content_checksum, len(contents)) front.add_blob_data(content_checksum, base64.b64encode(contents)) front.blob_finished(content_checksum) now = int(time()) front.add({'filename': filename, 'md5sum': content_checksum, 'ctime': now, 'mtime': now, 'size': len(contents)}) def set_file_contents(front, session_name, filename, contents): """Creates a new snapshot and replaces/creates the given file in the session.""" if get_file_contents(front, session_name, filename) == contents: return # No changes necessary rev = front.find_last_revision(session_name) front.create_session(session_name, base_session = rev) add_file_simple(front, filename, contents) front.commit(session_name) valid_session_props = set(["ignore", "include"]) def clone(from_front, to_front): from_front.acquire_repo_lock() to_front.acquire_repo_lock() try: __clone(from_front, to_front) finally: # Always try to release the locks, but any errors here are # probably not very interesting, let's ignore them. try: to_front.release_repo_lock() except: pass try: from_front.release_repo_lock() except: pass def __clone(from_front, to_front): # Check that other repo is a continuation of this one assert is_continuation(base_front = to_front, cont_front = from_front), \ "Cannot pull: %s is not a continuation of %s" % (from_front, to_front) # Copy all new sessions other_max_rev = from_front.get_highest_used_revision() self_max_rev = to_front.get_highest_used_revision() self = to_front other_repo = from_front assert other_max_rev >= self_max_rev sessions_to_clone = range(self_max_rev + 1, other_max_rev + 1) count = 0 all_deleted_snapshots = from_front.get_deleted_snapshots() snapshots_to_delete = find_snapshots_to_delete(from_front, to_front) if snapshots_to_delete: # It should not be possible to have incoming deleted snapshots # without at least one new snapshot as well. if not to_front.allows_permanent_erase(): raise UserError("Source repo has deleted snapshots, but destination repo does not allow deletions") assert sessions_to_clone for session_id in sessions_to_clone: count += 1 print "Cloning snapshot %s (%s/%s)" % (session_id, count, len(sessions_to_clone)) if session_id in all_deleted_snapshots: self.create_session(u"__deleted") if snapshots_to_delete: to_front.erase_snapshots(snapshots_to_delete) snapshots_to_delete = None deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(session_id) self.commit_deleted_snapshot(deleted_name, deleted_fingerprint) else: base_session = other_repo.get_base_id(session_id) session_info = other_repo.get_session_info(session_id) session_name = session_info['name'] self.create_session(session_name, base_session) if snapshots_to_delete: to_front.erase_snapshots(snapshots_to_delete) snapshots_to_delete = None __clone_single_snapshot(from_front, to_front, session_id) self.commit_raw(session_name = session_name, log_message = session_info.get("log_message", None), timestamp = session_info.get('timestamp', None), date = session_info['date']) if self.allows_permanent_erase(): removed_blobs_count = self.erase_orphan_blobs() print "Found and removed", removed_blobs_count," orphan blobs" def find_snapshots_to_delete(from_front, to_front): """ Find all snapshots in from_front that has been deleted, but has not yet been deleted in the clone to_front. """ snapshots_to_delete = [] self_max_rev = to_front.get_highest_used_revision() already_deleted_snapshots = set(to_front.get_deleted_snapshots()) for rev in from_front.get_deleted_snapshots(): if rev > self_max_rev: continue if rev in already_deleted_snapshots: continue deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(rev) session_info = to_front.get_session_info(rev) assert session_info['name'] == deleted_name assert to_front.get_session_fingerprint(rev) == deleted_fingerprint snapshots_to_delete.append(rev) return snapshots_to_delete def __clone_single_snapshot(from_front, to_front, session_id): """ This function requires that a new snapshot is underway in to_front. It does not commit that snapshot. """ assert from_front != to_front other_bloblist = from_front.get_session_bloblist(session_id) other_raw_bloblist = from_front.get_session_raw_bloblist(session_id) for n, blobinfo in enumerate(other_raw_bloblist): action = blobinfo.get("action", None) if not action: md5sum = blobinfo['md5sum'] if not (to_front.has_blob(md5sum) or to_front.new_snapshot_has_blob(md5sum)): pp = SimpleProgressPrinter(sys.stdout, label="Sending blob %s of %s (%s MB)" % (n+1, len(other_raw_bloblist), round(blobinfo['size'] / (1.0 * 2**20), 3))) sw = StopWatch(enabled=False, name="front.clone") to_front.init_new_blob(md5sum, blobinfo['size']) sw.mark("front.init_new_blob()") datasource = from_front.get_blob(md5sum) pp.update(0.0) datasource.set_progress_callback(pp.update) to_front.add_blob_data_streamed(blob_md5 = md5sum, datasource = datasource) pp.finished() sw.mark("front.add_blob_data_streamed()") to_front.blob_finished(md5sum) sw.mark("front.finished()") to_front.add(blobinfo) elif action == "remove": to_front.remove(blobinfo['filename']) else: assert False, "Unexpected blobinfo action: " + str(action) def is_identical(front1, front2): """ Returns True iff the other repo contains the same sessions with the same fingerprints as this repo.""" if not is_continuation(base_front = front2, cont_front = front2): return False return set(front1.get_session_ids()) == set(front2.get_session_ids()) def is_continuation(base_front, cont_front): """ Returns True if the other repo is a continuation of this one. That is, the other repo contains all the sessions of this repo, and then zero of more additional sessions.""" if set(base_front.get_session_ids()) > set(cont_front.get_session_ids()): # Not same sessions - cannot be successor return False other_deleted = cont_front.get_deleted_snapshots() for session_id in base_front.get_session_ids(): if session_id in other_deleted: continue base_front_session_info = base_front.get_session_info(session_id) cont_front_session_info = cont_front.get_session_info(session_id) if base_front_session_info['name'] != cont_front_session_info['name']: return False if base_front.get_session_fingerprint(session_id) != cont_front.get_session_fingerprint(session_id): return False return True def verify_repo(front, verify_blobs = True, verbose = False): """Returns True if the repo was clean. Otherwise throws an exception.""" for rev in range(1, front.repo_get_highest_used_revision() + 1): front.repo_verify_snapshot(rev) session_ids = front.get_session_ids() if verbose: print "Verifying %s snapshots" % (len(session_ids)) existing_blobs = set(front.get_all_raw_blobs()) | set(front.get_all_recipes()) for i in range(0, len(session_ids)): id = session_ids[i] bloblist = front.get_session_bloblist(id) # We must not use a # cached bloblist # here - we're # verifying the # repo! calc_fingerprint = bloblist_fingerprint(bloblist) if calc_fingerprint != front.get_session_fingerprint(id): raise CorruptionError("Fingerprint didn't match for snapshot %s" % id) for bi in bloblist: if bi['md5sum'] not in existing_blobs: raise CorruptionError("Snapshot %s is missing blob %s" % (session_ids[i], bi['md5sum'])) if verbose: print "Snapshot %s (%s): All %s blobs ok" % (id, calc_fingerprint, len(bloblist)) if not verify_blobs: if verbose: print "Skipping blob verification" return True if verbose: print "Collecting a list of all blobs..." count = front.init_verify_blobs() if verbose: print "Verifying %s blobs..." % (count) done = 0 while done < count: done += len(front.verify_some_blobs()) if verbose: print done, "of "+str(count)+" blobs verified, "+ \ str(round(1.0*done/count * 100,1)) + "% done." return True class Front: def __init__(self, repo): self.repo = repo self.new_session = None self.blobs_to_verify = [] self.loadstats = {} def allows_permanent_erase(self): return self.repo.allows_permanent_erase() def get_session_ids(self, session_name = None): sids = self.repo.get_all_sessions() if not session_name: return sids result = [] for sid in sids: session_info = self.get_session_info(sid) name = session_info.get("name") if name == session_name: result.append(sid) return result def get_session_names(self, include_meta = False): sessions_count = {} for sid in self.get_session_ids(): session_info = self.get_session_info(sid) name = session_info.get("name", "<no name>") if not include_meta and name.startswith("__"): continue sessions_count[name] = sessions_count.get(name, 0) + 1 return sessions_count.keys() def get_deleted_snapshots(self): return self.repo.get_deleted_snapshots() def get_dedup_block_size(self): return repository.DEDUP_BLOCK_SIZE def get_dedup_block_location(self, sha): return self.repo.get_block_location(sha) def get_deleted_snapshot_info(self, rev): """ Returns a tuple containing the snapshot deleted_name and deleted_fingerprint. """ assert self.repo.has_snapshot(rev) session_reader = self.repo.get_session(rev) properties = session_reader.get_properties() assert properties['client_data']['name'] == "__deleted", \ "Cannot get deleted snapshot info for not-deleted snapshots" return properties.get('deleted_name', None), properties.get('deleted_fingerprint', None) def __set_session_property(self, session_name, property_name, new_value): assert property_name in valid_session_props meta_session_name = "__meta_" + session_name if self.find_last_revision(meta_session_name) == None: self.__mksession(meta_session_name) value_string = json.dumps(new_value, indent = 4) assert value_string == json.dumps(new_value, indent = 4), "Memory corruption?" set_file_contents(self, meta_session_name, property_name + ".json", value_string) def __get_session_property(self, session_name, property_name): """Returns the value of the given session property, or None if there is no such property.""" assert property_name in valid_session_props meta_session_name = "__meta_" + session_name try: value_string = get_file_contents(self, meta_session_name, property_name + ".json") except SessionNotFoundError: return None if value_string == None: return None return json.loads(value_string) def set_session_ignore_list(self, session_name, new_list): assert isinstance(new_list, (tuple, list)), new_list self.__set_session_property(session_name, "ignore", new_list) def get_session_ignore_list(self, session_name): value = self.__get_session_property(session_name, "ignore") if value == None: return [] return value def set_session_include_list(self, session_name, new_list): assert isinstance(new_list, (tuple, list)), new_list self.__set_session_property(session_name, "include", new_list) def get_session_include_list(self, session_name): value = self.__get_session_property(session_name, "include") if value == None: return [] return value def get_session_info(self, id): """ Returns None if there is no such snapshot """ if not self.repo.has_snapshot(id): return None session_reader = self.repo.get_session(id) properties = session_reader.get_properties() return properties['client_data'] def get_base_id(self, id): session_reader = self.repo.get_session(id) baseid = session_reader.get_base_id() return baseid def get_predecessor(self, id): info = self.get_session_info(id) assert info, "No such revision" session_name = info['name'] ids = self.get_session_ids(session_name) ids.sort() pos = ids.index(id) assert pos >= 0 if pos == 0: return None return ids[pos - 1] def get_session_fingerprint(self, id): session_reader = self.repo.get_session(id) properties = session_reader.get_properties() assert "fingerprint" in properties return properties["fingerprint"] def get_session_bloblist(self, id): session_reader = self.repo.get_session(id) bloblist = list(session_reader.get_all_blob_infos()) seen = set() for b in bloblist: assert b['filename'] not in seen, "Duplicate file found in bloblist - internal error" seen.add(b['filename']) self.loadstats[id] = session_reader.load_stats return bloblist def get_session_load_stats(self, id): """Returns the load stats dict for the given session. The return value may be None if the session instance has not yet loaded its bloblist.""" return copy.copy(self.loadstats.get(id, None)) def get_session_raw_bloblist(self, id): session_reader = self.repo.get_session(id) return copy.copy(session_reader.get_raw_bloblist()) def get_stats(self): return self.repo.get_stats() def create_session(self, session_name, base_session = None, force_base_snapshot = False): """Creates a new snapshot for the given session. Commit() must be called when the construction of the new snapshot is completed().""" assert isinstance(session_name, basestring), session_name assert not self.new_session, "There already exists an active new snapshot" self.new_session = self.repo.create_snapshot(session_name = session_name, base_session = base_session, force_base_snapshot = force_base_snapshot) def create_base_snapshot(self, session_name, truncate = False): assert not self.new_session assert truncate in (True, False) with self.repo: sid = self.find_last_revision(session_name) assert sid, "No such session: %s" % session_name old_fingerprint = self.get_session_fingerprint(sid) self.create_session(session_name, base_session = sid, force_base_snapshot = True) if truncate: if not self.repo.allows_permanent_erase(): raise UserError("This repository does not allow destructive changes.") snapshots_to_erase = self.get_session_ids(session_name) self.new_session.erase_snapshots(snapshots_to_erase) new_sid = self.commit(session_name) new_fingerprint = self.get_session_fingerprint(new_sid) assert old_fingerprint == new_fingerprint assert self.repo.get_session(new_sid).get_base_id() == None return new_sid def truncate(self, session_name): return self.create_base_snapshot(session_name, truncate = True) def erase_snapshots(self, snapshot_ids): assert self.new_session, "erasing snapshots requires a new snapshot" self.new_session.erase_snapshots(snapshot_ids) def erase_orphan_blobs(self): with self.repo: return self.repo.erase_orphan_blobs() def cancel_snapshot(self): if not self.new_session: warn("Tried to cancel non-active new snapshot") return try: self.new_session.cancel() finally: self.new_session = None def has_snapshot(self, session_name, snapshot_id): """ Returns True if there exists a session with the given session_name and snapshot id """ if snapshot_id not in self.get_session_ids(): return False session_info = self.get_session_info(snapshot_id) name = session_info.get("name", None) return name == session_name def get_highest_used_revision(self): return self.repo.get_highest_used_revision() def is_deleted(self, snapshot_id): """ Returns True if the given snapshot used to exist, but has been explicitly deleted.""" return self.repo.is_deleted(snapshot_id) def init_new_blob(self, blob_md5, size): self.new_session.init_new_blob(blob_md5, size) def get_all_rolling(self): return self.repo.blocksdb.get_all_rolling() def has_block(self, sha256): return self.repo.blocksdb.has_block(sha256) def add_blob_data(self, blob_md5, b64data): """ Must be called after a create_session() """ self.new_session.add_blob_data(blob_md5, base64.b64decode(b64data)) def add_blob_data_streamed(self, blob_md5, datasource): import hashlib, common assert is_md5sum(blob_md5) summer = hashlib.md5() total = datasource.bytes_left() while datasource.bytes_left() > 0: # repository.DEDUP_BLOCK_SIZE is a reasonable size - no other reason block = datasource.read(repository.DEDUP_BLOCK_SIZE) summer.update(block) self.new_session.add_blob_data(blob_md5, block) if summer.hexdigest() != blob_md5: raise common.ContentViolation("Received blob data differs from promised.") def blob_finished(self, blob_md5): self.new_session.blob_finished(blob_md5) def add(self, metadata): """ Must be called after a create_session(). Adds a link to a existing blob. Will throw an exception if there is no such blob """ assert metadata.has_key("md5sum") assert metadata.has_key("filename") self.new_session.add(metadata) def remove(self, filename): """Mark the given file as deleted in the snapshot currently under construction.""" assert self.new_session self.new_session.remove(filename) def __mksession(self, session_name): """Create a new session. For internal use. Allows names that starts with "__", but throws UserError for invalid names or if the session already exists. """ if self.find_last_revision(session_name) != None: raise Exception("There already exists a session named '%s'" % (session_name)) if session_name.strip() != session_name: raise UserError("Session names must not begin or end with whitespace.") if session_name == "": raise UserError("Session names must not be empty") if "/" in session_name: raise UserError("Session names must not contain slashes.") if "\\" in session_name: raise UserError("Session names must not contain backslashes.") if self.find_last_revision(session_name) != None: raise UserError("There already exists a session named '%s'" % (session_name)) self.create_session(session_name = session_name) return self.commit_raw(session_name, None, int(time()), ctime()) def mksession(self, session_name): """Create a new session. Throws a UserError for invalid session names and if the session already exists.""" if session_name.startswith("__"): raise UserError("Session names must not begin with double underscores.") return self.__mksession(session_name) def commit_deleted_snapshot(self, deleted_name, deleted_fingerprint): self.new_session.deleted_snapshot(deleted_name, deleted_fingerprint) rev = self.new_session.commit({'name': '__deleted'}) self.new_session = None return rev def commit_raw(self, session_name, log_message, timestamp, date, progress_callback = lambda x: None): """Commit a snapshot. For internal use. The session does not need to exist beforehand.""" assert self.new_session, "There is no active snapshot to commit" assert timestamp == None or type(timestamp) == int session_info = {} session_info["name"] = session_name if timestamp: session_info["timestamp"] = timestamp session_info["date"] = date if log_message: session_info["log_message"] = log_message rev = self.new_session.commit(session_info, progress_callback) self.new_session = None return rev def commit(self, session_name, log_message = None, progress_callback = lambda x: None): """Commit a snapshot started with create_snapshot(). The session must exist beforehand. Accepts an optional log message.""" if log_message != None: assert type(log_message) == unicode, "Log message must be in unicode" assert type(session_name) == unicode if not self.find_last_revision(session_name): raise UserError("Session '%s' does not seem to exist in the repo." % (session_name)) return self.commit_raw(session_name, log_message, int(time()), ctime(), progress_callback = progress_callback) def get_blob_size(self, sum): return self.repo.get_blob_size(sum) def get_blob(self, sum, offset = 0, size = None): datasource = self.repo.get_blob_reader(sum, offset, size) return datasource def has_blob(self, sum): return self.repo.has_blob(sum) def get_all_blobs(self): """ Returns a list of all blobs (raw or recipes) in the repository. This method is deprecated. Use get_all_raw_blobs() and/or get_all_recipes() instead.""" return self.get_all_raw_blobs() + self.get_all_raw_blobs(self) def get_all_raw_blobs(self): return self.repo.get_raw_blob_names() def get_all_recipes(self): return self.repo.get_recipe_names() def new_snapshot_has_blob(self, sum): assert self.new_session, "new_snapshot_has_blob() must only be called when a new snapshot is underway" return self.new_session.has_blob(sum) def find_last_revision(self, session_name): """ Returns the id of the latest snapshot in the specified session. Returns None if there is no such session. """ return self.repo.find_last_revision(session_name) def init_verify_blobs(self): assert self.blobs_to_verify == [] self.blobs_to_verify = self.repo.get_raw_blob_names() + self.repo.get_recipe_names() for scanner in self.repo.scanners: scanner.scan_init() return len(self.blobs_to_verify) def verify_some_blobs(self): succeeded = [] count = min(100, len(self.blobs_to_verify)) for i in range(0, count): blob_to_verify = self.blobs_to_verify.pop() if not self.repo.verify_blob(blob_to_verify): raise CorruptionError("Blob corrupted: " + blob_to_verify) succeeded.append(blob_to_verify) if not self.blobs_to_verify: for scanner in self.repo.scanners: scanner.scan_finish() return succeeded def repo_get_highest_used_revision(self): return self.repo.get_highest_used_revision() def repo_verify_snapshot(self, rev): return self.repo.verify_snapshot(rev) def acquire_repo_lock(self): self.repo.repo_mutex.lock() def release_repo_lock(self): self.repo.repo_mutex.release() def get_repo_identifier(self): return self.repo.get_repo_identifier() def deduplication_enabled(self): return self.repo.deduplication_enabled() class DryRunFront: def __init__(self, front): self.realfront = front def get_session_ids(self): return self.realfront.get_session_ids() def get_session_info(self, id): return self.realfront.get_session_properties(id)['client_data'] def get_session_bloblist(self, id): return self.realfront.get_session_bloblist(id) def create_session(self, session_name, base_session = None, force_base_snapshot = False): pass def init_new_blob(self, blob_md5, size): pass def add_blob_data(self, blob_md5, b64data): pass def get_all_rolling(self): return [] def add_blob_data_streamed(self, blob_md5=None, progress_callback=None, datasource=None): while datasource.remaining: datasource.read(2**12) def blob_finished(self, blob_md5): pass def add(self, metadata): pass def remove(self, filename): pass def commit(self, session_name, log_message = None, progress_callback = None): return 0 def get_blob_size(self, sum): return self.realfront.get_blob_size(sum) def get_blob_b64(self, sum, offset = 0, size = None): return self.realfront.get_blob_b64(sum, offset, size) def has_blob(self, sum): return self.realfront.has_blob(sum) def new_snapshot_has_blob(self, sum): return False def find_last_revision(self, session_name): return self.realfront.find_last_revision(session_name) def mksession(self, session_name): pass for attrib in Front.__dict__: if not attrib.startswith("_") and callable(Front.__dict__[attrib]): if not attrib in DryRunFront.__dict__: pass #warn("Missing in DryRunFront: "+ attrib)
apache-2.0
9,191,154,933,669,040,000
41.134094
118
0.628961
false
3.933413
false
false
false
nirinA/scripts_python
mangorona.py
1
12832
'''game of mangorona. goal: keep more pawn on the board than your opponent. movement: move your pawn to an unoccupied place. pick: fill or create an empty place beetwen your pawn and your opponent's, and pick all opponent pawn in the same line of movement. ''' import sys import random import time import profile import traceback class IllegalMove(Exception): pass class NoMoreMove(Exception): pass class Init(object): def __init__(self, dimension, players, lattice): self.dimension = dimension self.xmax, self.ymax = dimension self.player1, self.player2, self.blank = players self.lattice = lattice self.all = [(x,y) for x in range(self.xmax) for y in range(self.ymax)] self.gain = {self.player1:0, self.player2:0} class Position(Init): '''get all positions around one point''' def __init__(self, p, dimension, lattice): Init.__init__(self, dimension, ('','',''), lattice) self.xi, self.yi = p ##'''pawn can move only horizontally ''' self.p1 = self.xi+1, self.yi self.p2 = self.xi-1, self.yi ##'''pawn can move only verticaly''' self.p3 = self.xi, self.yi+1 self.p4 = self.xi, self.yi-1 ##'''pawn can also move diagonaly''' self.p5 = self.xi-1, self.yi-1 self.p6 = self.xi-1, self.yi+1 self.p7 = self.xi+1, self.yi-1 self.p8 = self.xi+1, self.yi+1 if lattice is None: if sum(p)%2: self.around = self.p1,self.p2,self.p3,self.p4 else: self.around = self.p1,self.p2,self.p3,self.p4,\ self.p5,self.p6,self.p7,self.p8 elif lattice == 'star': if sum(p)%2: self.around = self.p1,self.p2,self.p3,self.p4 else: self.around = self.p1,self.p2,self.p3,self.p4,\ self.p5,self.p6,self.p7,self.p8 elif lattice == 'diamond': if sum(p)%2: self.around = self.p1,self.p2,self.p3,self.p4,\ self.p5,self.p6,self.p7,self.p8 else: self.around = self.p1,self.p2,self.p3,self.p4 elif lattice == 'cubic': self.around = self.p1,self.p2,self.p3,self.p4 elif lattice == 'web': self.around = self.p1,self.p2,self.p3,self.p4,\ self.p5,self.p6,self.p7,self.p8 elif lattice == 'X': self.around = self.p5,self.p6,self.p7,self.p8 def Movable(self): return [p for p in self.around if p in self.all] def Deletable(self, final): xf, yf = final deltax = xf - self.xi deltay = yf - self.yi removeup = [] removedown = [] xu = xd = self.xi yu = yd = self.yi while (0<=xu<=self.xmax) and (0<=yu<=self.ymax): xu += deltax yu += deltay removeup.append((xu,yu)) removeup.remove((xf, yf)) while (0<=xd<=self.xmax) and (0<=yd<=self.ymax): xd -= deltax yd -= deltay removedown.append((xd,yd)) return [xy for xy in removeup if xy in self.all],\ [xy for xy in removedown if xy in self.all] class Mangorona(Init): def __init__(self, players, lattice, dimension, matrix=None): '''set matrix to None to create an initial board with self.Create''' if matrix is None: self.matrix = self.Create(dimension, players) else: self.matrix = matrix Init.__init__(self, (len(self.matrix), len(self.matrix[0])), players, lattice) def Create(self, dimension, players): xmax, ymax = dimension player1, player2, blank = players m =[[None for i in range(ymax)] for j in range(xmax)] for x in range(xmax): for y in range(ymax): if (x < int(xmax/2)): m[x][y]=player1 elif (x == int(xmax/2)): if (y < int(ymax/2)): if y%2 != 0: m[x][y]=player2 else: m[x][y]=player1 elif (y == int(ymax/2)): m[x][y]=blank else: if y%2 != 0: m[x][y]=player1 else: m[x][y]=player2 else: m[x][y]=player2 return m def Zero(self): '''return the position(s) of blank''' w = [] for i in range(self.xmax): c = self.matrix[i].count(self.blank) s = 0 while c > 0: n = self.matrix[i].index(self.blank, s) w.append((i, n)) s = n + 1 c -= 1 return w def Pawn(self, position, turn): x, y = position if self.matrix[x][y] == turn: return True return False def MovablePawn(self, turn): movable = [] wherezero = self.Zero() for p in wherezero: pos = Position(p, self.dimension, self.lattice) turnmovable = [i for i in pos.Movable() if self.Pawn(i,turn)] movable.extend(turnmovable) return movable def ChangePawn(self, turn, initial, final): xi, yi = initial xf, yf = final self.matrix[xi][yi]=self.blank self.matrix[xf][yf]=turn todelete = Position(initial, self.dimension, self.lattice).Deletable(final) for t in todelete: for p in t: x,y = p if (not self.Pawn(p, turn) and self.matrix[x][y] != self.blank): self.matrix[x][y] = self.blank self.gain[turn] += 1 else: break def Move(self, turn, initial, final): if initial == final: raise IllegalMove("you don't move") if not self.Pawn(initial, turn): raise IllegalMove('not your pawn') if final not in self.Zero(): raise IllegalMove('destination must be empty') if initial not in self.MovablePawn(turn): raise IllegalMove('this pawn cannot move') if final not in Position(initial, self.dimension, self.lattice).around: raise IllegalMove('not allowable move') self.ChangePawn(turn, initial, final) def Winner(self): if self.gain[self.player1]<self.gain[self.player2]: return self.player2 elif self.gain[self.player1]>self.gain[self.player2]: return self.player1 else: return self.blank class AllowableMovement(object): def __init__(self, m, turn): self.m = m.matrix self.blank = m.blank self.mZero = m.Zero() self.mMovablePawn = m.MovablePawn(turn) self.mdimension = m.dimension self.player = turn self.mlattice = m.lattice def Move(self, maximum=False, getall=False): '''check if the player can move, and used as machine player''' move = {} for i in self.mMovablePawn: pos = Position(i, self.mdimension, self.mlattice) listf = [f for f in pos.around if f in self.mZero] for f in listf: if getall: move.update({(i,f):0}) else: moveup , movedown = pos.Deletable(f) up = [self.m[x][y] for (x,y) in moveup] down = [self.m[x][y] for (x,y) in movedown] if self.blank in up: up = up[:up.index(self.blank)] if self.player in up: up = up[:up.index(self.player)] if self.blank in down: down = down[:down.index(self.blank)] if self.player in down: down = down[:down.index(self.player)] get = len(up+down) if get>0: move.update({(i,f):get}) if move: if maximum: getmax = max(move.values()) for k in list(move.keys()): if move[k]<getmax: move.pop(k) return list(move.keys()) else: raise NoMoreMove('%s cannot move anymore'%self.player) class Board(object): '''displaying the game in command line mode''' def __init__(self, m): self.m = m.matrix self.x = m.xmax self.y = m.ymax self.evenline = [chr(92), '/'] self.oddline = ['/', chr(92)] if m.lattice == 'diamond': self.evenline.reverse() self.oddline.reverse() if m.lattice == 'cubic': self.evenline = [' ', ' '] self.oddline = [' ', ' '] if m.lattice == 'web': self.evenline = ['x', 'x'] self.oddline = ['x', 'x'] def WidthLine(self, listline): if self.y%2==0: return ' |%s|'%'|'.join(listline*int(self.y/2))[:-2] return ' |%s|'%'|'.join(listline*int(self.y/2)) def Inline(self, i): if i%2==0: return self.WidthLine(self.evenline) if i%2!=0: return self.WidthLine(self.oddline) def Display(self): d = ' '+' '.join([str(j) for j in range(self.y)])+'\n' for i in range(self.x): d += str(i)+' ' d += '-'.join([str(self.m[i][j]) for j in range(self.y)]) d += ' '+str(i)+'\n' if i!=self.x-1: d += self.Inline(i)+'\n' return d+' '+' '.join([str(j) for j in range(self.y)])+'\n' def MachineMachine(): LATTICE = 'star' ##, 'diamond' DIMENSION = (5,11) PLAYERS = 'a', 'b', ' ' ##mc = Mangorona(PLAYERS,'cubic', DIMENSION, None) ##maximum=True ##getall=True mc = Mangorona(PLAYERS,'diamond', (7,11), None) maximum=True getall=False t = PLAYERS[:2] tab = 0 print(Board(mc).Display()) while True: try: turn = t[tab%2] movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall) machine = random.choice(movable) print(turn, 'move:', machine[0], machine[1]) mc.Move(turn, machine[0], machine[1]) print(Board(mc).Display()) print(mc.gain['a'], mc.gain['b']) ##, t1-t0 print() tab += 1 except IllegalMove: exc = traceback.format_exception(*sys.exc_info())[-1] print(exc) except NoMoreMove: exc = traceback.format_exception(*sys.exc_info())[-1] print(exc) print('winner:', mc.Winner()) break def TestvsMachine(): LATTICE = 'star' DIMENSION = 5, 9 PLAYERS = 'a', 'b', ' ' machineplayer = PLAYERS[0] mc = Mangorona(PLAYERS,LATTICE, DIMENSION, None) maximum=True getall=False t = PLAYERS[:2] tab = 0 print(Board(mc).Display()) while True: try: turn = t[tab%2] movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall) if turn == machineplayer: machine = random.choice(movable) print(turn, 'move:', machine[0], machine[1]) mc.Move(turn, machine[0], machine[1]) print(Board(mc).Display()) print(mc.gain['a'], mc.gain['b']) ##, t1-t0 print() tab += 1 else: h = input("type:'?' for movable, 'z' for Zero, 'h' for rules\nyour move - :") if h == '?': print(mc.MovablePawn(turn)) elif h == 'z': print(mc.Zero()) elif h == 'h': print(__doc__) else: human = eval(h) if human not in movable: raise IllegalMove('not allowable move') mc.Move(turn, human[0], human[1]) print(Board(mc).Display()) tab += 1 except IllegalMove: exc = traceback.format_exception(*sys.exc_info())[-1] print(exc) except NoMoreMove: exc = traceback.format_exception(*sys.exc_info())[-1] print(exc) print('winner:', mc.Winner()) break except KeyboardInterrupt: raise SystemExit except: traceback.print_exc() __version__ = '3k-0.0.0' __author__ = 'nirinA' __date__ = 'Sat May 10 21:52:15 2008'
unlicense
4,551,880,349,135,935,500
33.869565
93
0.488622
false
3.583357
false
false
false
rjschwei/azure-sdk-for-python
azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/azure_sku.py
1
1111
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class AzureSku(Model): """AzureSku. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: SKU name. Default value: "S1" . :vartype name: str :ivar tier: SKU tier. Default value: "Standard" . :vartype tier: str """ _validation = { 'name': {'required': True, 'constant': True}, 'tier': {'required': True, 'constant': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, } name = "S1" tier = "Standard"
mit
1,389,177,205,370,494,500
27.487179
76
0.540954
false
4.240458
false
false
false
vjlux/luxlib
LuxSynth/LuxSynth/LuxPreprocessor.py
1
1130
#!/usr/bin/env python3 ## Copyright (c) MIT. All rights reserved. ## lux ([email protected]) 2016 ############################################################ # Imports ############################################################ import logging import LuxImage import open3d as o3d import numpy as np ############################################################ # Globals ############################################################ ############################################################ # Classes ############################################################ class LuxPreprocessor(object): """Preprocessor class for raw data loading.""" m_outputPath = "./"; m_inputPath = "./"; def __init__( self, p_inputPath, p_outputPath): self.m_outputPath = p_outputPath; def LoadDepthFromRGB24bitImage(self, p_depthImageFileName): #depth = np.array([]); #color_raw = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg") depth_raw = o3d.io.read_image(p_depthImageFileName); return depth_raw;
mit
3,467,147,251,003,499,000
24.704545
77
0.39646
false
4.574899
false
false
false
chaincoin/chaincoin
qa/rpc-tests/replace-by-fee.py
1
22023
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test replace by fee code # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.script import * from test_framework.mininode import * MAX_REPLACEMENT_LIMIT = 100 def txToHex(tx): return bytes_to_hex_str(tx.serialize()) def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): """Create a txout with a given amount and scriptPubKey Mines coins as needed. confirmed - txouts created will be confirmed in the blockchain; unconfirmed otherwise. """ fee = 1*COIN while node.getbalance() < satoshi_round((amount + fee)/COIN): node.generate(100) #print (node.getbalance(), amount, fee) new_addr = node.getnewaddress() #print new_addr txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) tx1 = node.getrawtransaction(txid, 1) txid = int(txid, 16) i = None for i, txout in enumerate(tx1['vout']): #print i, txout['scriptPubKey']['addresses'] if txout['scriptPubKey']['addresses'] == [new_addr]: #print i break assert i is not None tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(txid, i))] tx2.vout = [CTxOut(amount, scriptPubKey)] tx2.rehash() signed_tx = node.signrawtransaction(txToHex(tx2)) txid = node.sendrawtransaction(signed_tx['hex'], True) # If requested, ensure txouts are confirmed. if confirmed: mempool_size = len(node.getrawmempool()) while mempool_size > 0: node.generate(1) new_size = len(node.getrawmempool()) # Error out if we have something stuck in the mempool, as this # would likely be a bug. assert(new_size < mempool_size) mempool_size = new_size return COutPoint(int(txid, 16), 0) class ReplaceByFeeTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = False def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug", "-relaypriority=0", "-whitelist=127.0.0.1", "-limitancestorcount=50", "-limitancestorsize=101", "-limitdescendantcount=200", "-limitdescendantsize=101" ])) self.is_network_split = False def run_test(self): make_utxo(self.nodes[0], 1*COIN) print("Running test simple doublespend...") self.test_simple_doublespend() print("Running test doublespend chain...") self.test_doublespend_chain() print("Running test doublespend tree...") self.test_doublespend_tree() print("Running test replacement feeperkb...") self.test_replacement_feeperkb() print("Running test spends of conflicting outputs...") self.test_spends_of_conflicting_outputs() print("Running test new unconfirmed inputs...") self.test_new_unconfirmed_inputs() print("Running test too many replacements...") self.test_too_many_replacements() print("Running test opt-in...") self.test_opt_in() print("Running test prioritised transactions...") self.test_prioritised_transactions() print("Passed\n") def test_simple_doublespend(self): """Simple doublespend""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Should fail because we haven't changed the fee tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] tx1b_hex = txToHex(tx1b) try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) # Extra 0.1 BTC fee tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] tx1b_hex = txToHex(tx1b) tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) mempool = self.nodes[0].getrawmempool() assert (tx1a_txid not in mempool) assert (tx1b_txid in mempool) assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) def test_doublespend_chain(self): """Doublespend of a long chain""" initial_nValue = 50*COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) prevout = tx0_outpoint remaining_value = initial_nValue chain_txids = [] while remaining_value > 10*COIN: remaining_value -= 1*COIN tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = [CTxOut(remaining_value, CScript([1]))] tx_hex = txToHex(tx) txid = self.nodes[0].sendrawtransaction(tx_hex, True) chain_txids.append(txid) prevout = COutPoint(int(txid, 16), 0) # Whether the double-spend is allowed is evaluated by including all # child fees - 40 BTC - so this attempt is rejected. dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) try: self.nodes[0].sendrawtransaction(dbl_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) # transaction mistakenly accepted! # Accepted with sufficient fee dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, True) mempool = self.nodes[0].getrawmempool() for doublespent_txid in chain_txids: assert(doublespent_txid not in mempool) def test_doublespend_tree(self): """Doublespend of a big tree of transactions""" initial_nValue = 50*COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): if _total_txs is None: _total_txs = [0] if _total_txs[0] >= max_txs: return txout_value = (initial_value - fee) // tree_width if txout_value < fee: return vout = [CTxOut(txout_value, CScript([i+1])) for i in range(tree_width)] tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = vout tx_hex = txToHex(tx) assert(len(tx.serialize()) < 100000) txid = self.nodes[0].sendrawtransaction(tx_hex, True) yield tx _total_txs[0] += 1 txid = int(txid, 16) for i, txout in enumerate(tx.vout): for x in branch(COutPoint(txid, i), txout_value, max_txs, tree_width=tree_width, fee=fee, _total_txs=_total_txs): yield x fee = int(0.0001*COIN) n = MAX_REPLACEMENT_LIMIT tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) # Attempt double-spend, will fail because too little fee paid dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) try: self.nodes[0].sendrawtransaction(dbl_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) # 1 BTC fee is enough dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, True) mempool = self.nodes[0].getrawmempool() for tx in tree_txs: tx.rehash() assert (tx.hash not in mempool) # Try again, but with more total transactions than the "max txs # double-spent at once" anti-DoS limit. for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): fee = int(0.0001*COIN) tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) try: self.nodes[0].sendrawtransaction(dbl_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) assert_equal("too many potential replacements" in exp.error['message'], True) else: assert(False) for tx in tree_txs: tx.rehash() self.nodes[0].getrawtransaction(tx.hash) def test_replacement_feeperkb(self): """Replacement requires fee-per-KB to be higher""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Higher fee, but the fee per KB is much lower, so the replacement is # rejected. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] tx1b_hex = txToHex(tx1b) try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) def test_spends_of_conflicting_outputs(self): """Replacements that spend conflicting tx outputs are rejected""" utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) utxo2 = make_utxo(self.nodes[0], 3*COIN) tx1a = CTransaction() tx1a.vin = [CTxIn(utxo1, nSequence=0)] tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) tx1a_txid = int(tx1a_txid, 16) # Direct spend an output of the transaction we're replacing. tx2 = CTransaction() tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) tx2.vout = tx1a.vout tx2_hex = txToHex(tx2) try: tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Spend tx1a's output to test the indirect case. tx1b = CTransaction() tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1b_hex = txToHex(tx1b) tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) tx1b_txid = int(tx1b_txid, 16) tx2 = CTransaction() tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), CTxIn(COutPoint(tx1b_txid, 0))] tx2.vout = tx1a.vout tx2_hex = txToHex(tx2) try: tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) def test_new_unconfirmed_inputs(self): """Replacements that add new unconfirmed inputs are rejected""" confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) tx1 = CTransaction() tx1.vin = [CTxIn(confirmed_utxo)] tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1_hex = txToHex(tx1) tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True) tx2 = CTransaction() tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] tx2.vout = tx1.vout tx2_hex = txToHex(tx2) try: tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) def test_too_many_replacements(self): """Replacements that evict too many transactions are rejected""" # Try directly replacing more than MAX_REPLACEMENT_LIMIT # transactions # Start by creating a single transaction with many outputs initial_nValue = 10*COIN utxo = make_utxo(self.nodes[0], initial_nValue) fee = int(0.0001*COIN) split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1) outputs = [] for i in range(MAX_REPLACEMENT_LIMIT+1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() splitting_tx.vin = [CTxIn(utxo, nSequence=0)] splitting_tx.vout = outputs splitting_tx_hex = txToHex(splitting_tx) txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) txid = int(txid, 16) # Now spend each of those outputs individually for i in range(MAX_REPLACEMENT_LIMIT+1): tx_i = CTransaction() tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] tx_i_hex = txToHex(tx_i) self.nodes[0].sendrawtransaction(tx_i_hex, True) # Now create doublespend of the whole lot; should fail. # Need a big enough fee to cover all spending transactions and have # a higher fee rate double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) inputs = [] for i in range(MAX_REPLACEMENT_LIMIT+1): inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) double_tx = CTransaction() double_tx.vin = inputs double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] double_tx_hex = txToHex(double_tx) try: self.nodes[0].sendrawtransaction(double_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) assert_equal("too many potential replacements" in exp.error['message'], True) else: assert(False) # If we remove an input, it should pass double_tx = CTransaction() double_tx.vin = inputs[0:-1] double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] double_tx_hex = txToHex(double_tx) self.nodes[0].sendrawtransaction(double_tx_hex, True) def test_opt_in(self): """ Replacing should only work if orig tx opted in """ tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) # Create a non-opting in transaction tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Shouldn't be able to double-spend tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] tx1b_hex = txToHex(tx1b) try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: print(tx1b_txid) assert(False) tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) # Create a different non-opting in transaction tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx2a_hex = txToHex(tx2a) tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) # Still shouldn't be able to double-spend tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] tx2b_hex = txToHex(tx2b) try: tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Now create a new transaction that spends from tx1a and tx2a # opt-in on one of the inputs # Transaction should be replaceable on either input tx1a_txid = int(tx1a_txid, 16) tx2a_txid = int(tx2a_txid, 16) tx3a = CTransaction() tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] tx3a_hex = txToHex(tx3a) self.nodes[0].sendrawtransaction(tx3a_hex, True) tx3b = CTransaction() tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] tx3b_hex = txToHex(tx3b) tx3c = CTransaction() tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] tx3c_hex = txToHex(tx3c) self.nodes[0].sendrawtransaction(tx3b_hex, True) # If tx3b was accepted, tx3c won't look like a replacement, # but make sure it is accepted anyway self.nodes[0].sendrawtransaction(tx3c_hex, True) def test_prioritised_transactions(self): # Ensure that fee deltas used via prioritisetransaction are # correctly used by replacement logic # 1. Check that feeperkb uses modified fees tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Higher fee, but the actual fee per KB is much lower. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] tx1b_hex = txToHex(tx1b) # Verify tx1b cannot replace tx1a. try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Use prioritisetransaction to set tx1a's fee to 0. self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN)) # Now tx1b should be able to replace tx1a tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) assert(tx1b_txid in self.nodes[0].getrawmempool()) # 2. Check that absolute fee checks use modified fee. tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx2a_hex = txToHex(tx2a) tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) # Lower fee, but we'll prioritise it tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] tx2b.rehash() tx2b_hex = txToHex(tx2b) # Verify tx2b cannot replace tx2a. try: tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Now prioritise tx2b to have a higher modified fee self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN)) # tx2b should now be accepted tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) assert(tx2b_txid in self.nodes[0].getrawmempool()) if __name__ == '__main__': ReplaceByFeeTest().main()
mit
5,439,684,677,800,086,000
36.327119
105
0.5823
false
3.367946
true
false
false
pytorch/fairseq
examples/multilingual/data_scripts/utils/fasttext_multi_filter.py
1
2340
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/bin/python import fasttext from multiprocessing import Pool import contextlib import sys import argparse from functools import partial import io model = None def init(model_path): global model model = fasttext.load_model(model_path) def pred(lines): return lines, [model.predict(line.strip())[0][0][9:] for line in lines] def main(): parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, required=True, help="model to load") parser.add_argument("--inputs", nargs="+", default=['-'], help="input files to filter") parser.add_argument("--langs", nargs="+", required=True, help="lang ids of each input file") parser.add_argument("--outputs", nargs="+", default=['-'], help="path to save lid filtered outputs") parser.add_argument("--num-workers", type=int, metavar="N", default=10, help="number of processes in parallel") args = parser.parse_args() assert len(args.inputs) == len(args.langs) and len(args.inputs) == len(args.outputs) with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8", newline="\n", errors="replace")) if input != "-" else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors="replace") for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8", newline="\n")) if output != "-" else sys.stdout for output in args.outputs ] with Pool(args.num_workers, initializer=partial(init, args.model)) as p: skip_cnt = 0 for lines, preds in p.imap(pred, list(zip(*inputs)), chunksize=500): if not all(a == b for a, b in zip(preds, args.langs)): skip_cnt += 1 continue for line, output_h in zip(lines, outputs): print(line.strip(), file=output_h) print(f"Skipped {skip_cnt} lines.") if __name__ == "__main__": main()
mit
-9,120,572,381,256,521,000
36.142857
107
0.584188
false
4.048443
false
false
false
Southpaw-TACTIC/TACTIC
src/pyasm/web/webware_adapter.py
1
4985
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # # # DEPRECATED # __all__ = ['get_app_server', 'get_xmlrpc_server', 'WebWareException', 'WebWare', 'WebWareXmlrpcAdapter'] import types, os from WebKit.Page import Page from pyasm.common import Config from pyasm.web import Url from web_environment import * class WebWareException(Exception): pass def get_app_server(): '''dynamically load in the appserver classes''' from app_server import BaseAppServer from WebKit.Page import Page class AppServer(Page, BaseAppServer): def get_adapter(self): adapter = WebWare(self) return adapter def writeHTML(self): self.writeln( self.get_display() ) return AppServer def get_xmlrpc_server(): '''dynamically load in an xmlrpc server''' from WebKit.XMLRPCServlet import XMLRPCServlet class XmlrpcServer(XMLRPCServlet): def get_adapter(self): adapter = WebWareXmlrpcAdapter(self.transaction()) return adapter return XmlrpcServer class WebWare(WebEnvironment): """Encapsulates webware environment. Implements the web interface""" def __init__(self,page): super(WebWare,self).__init__() self.request = page.request() self.response = page.response() def get_context_name(self): '''this includes all of the subdirectories as well as the main context''' dir = self.request.urlPathDir() # strip of the / at the front and the back dir = dir.rstrip("/") dir = dir.lstrip("/") return dir # form submission methods #def reset_form(self): # return self.request.fields() = {} def get_form_keys(self): return self.request.fields().keys() def has_form_key(self, key): return key in self.request.fields(): def set_form_value(self, name, value): '''Set the form value to appear like it was submitted''' self.request.setField(name, value) def get_form_values(self, name, raw=False): """returns a string list of the values of a form element. If raw is True, then a nonexistant value returns None""" if self.request.hasValue(name): values = self.request.value(name) if isinstance(values, basestring): values = values.decode('utf-8') values = self._process_unicode(values) return [values] elif isinstance(values, list): new_values = [] for value in values: if isinstance(value, basestring): value = self._process_unicode(value.decode('utf-8')) new_values.append(value) return new_values else: # this can be a FieldStorage instance return values else: if raw == True: return None else: return [] def get_form_value(self, name, raw=False): """returns the string value of the form element. If raw is True, then a nonexistant value returns None""" values = self.get_form_values(name,raw) if values == None: return None if values.__class__.__name__ == "FieldStorage": return values elif len(values) > 0: return values[0] else: return "" def _process_unicode(self, value): try: value = value.encode("ascii") except: chars = [] for char in value: ord_value = ord(char) if ord_value > 128: chars.append("&#%s;" % ord(char) ) else: chars.append(char) value = "".join(chars) return value # cookie methods def set_cookie(self, name, value): """set a cookie""" self.response.setCookie(name, value, expires="NEVER") def get_cookie(self, name): """get a cookie""" if self.request.hasCookie(name): return self.request.cookie(name) else: return "" # environment methods def get_env_keys(self): env = self.request.environ() return env.keys() def get_env(self, env_var): env = self.request.environ() return env.get(env_var) class WebWareXmlrpcAdapter(WebWare): def __init__(self, transaction): # NOTE: the call to WebWare's super is intentional super(WebWare,self).__init__() self.request = transaction.request() self.response = transaction.response()
epl-1.0
7,359,882,747,816,298,000
23.800995
104
0.566901
false
4.231749
false
false
false
phenoxim/nova
nova/tests/unit/api/openstack/placement/test_handler.py
1
7223
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the functions used by the placement API handlers.""" import microversion_parse import mock import routes import webob from nova.api.openstack.placement import handler from nova.api.openstack.placement.handlers import root from nova.api.openstack.placement import microversion from nova import test from nova.tests import uuidsentinel # Used in tests below def start_response(*args, **kwargs): pass def _environ(path='/moo', method='GET'): return { 'PATH_INFO': path, 'REQUEST_METHOD': method, 'SERVER_NAME': 'example.com', 'SERVER_PORT': '80', 'wsgi.url_scheme': 'http', # The microversion version value is not used, but it # needs to be set to avoid a KeyError. microversion.MICROVERSION_ENVIRON: microversion_parse.Version(1, 12), } class DispatchTest(test.NoDBTestCase): def setUp(self): super(DispatchTest, self).setUp() self.mapper = routes.Mapper() self.route_handler = mock.MagicMock() def test_no_match_null_map(self): self.assertRaises(webob.exc.HTTPNotFound, handler.dispatch, _environ(), start_response, self.mapper) def test_no_match_with_map(self): self.mapper.connect('/foobar', action='hello') self.assertRaises(webob.exc.HTTPNotFound, handler.dispatch, _environ(), start_response, self.mapper) def test_simple_match(self): self.mapper.connect('/foobar', action=self.route_handler, conditions=dict(method=['GET'])) environ = _environ(path='/foobar') handler.dispatch(environ, start_response, self.mapper) self.route_handler.assert_called_with(environ, start_response) def test_simple_match_routing_args(self): self.mapper.connect('/foobar/{id}', action=self.route_handler, conditions=dict(method=['GET'])) environ = _environ(path='/foobar/%s' % uuidsentinel.foobar) handler.dispatch(environ, start_response, self.mapper) self.route_handler.assert_called_with(environ, start_response) self.assertEqual(uuidsentinel.foobar, environ['wsgiorg.routing_args'][1]['id']) class MapperTest(test.NoDBTestCase): def setUp(self): super(MapperTest, self).setUp() declarations = { '/hello': {'GET': 'hello'} } self.mapper = handler.make_map(declarations) def test_no_match(self): environ = _environ(path='/cow') self.assertIsNone(self.mapper.match(environ=environ)) def test_match(self): environ = _environ(path='/hello') action = self.mapper.match(environ=environ)['action'] self.assertEqual('hello', action) def test_405_methods(self): environ = _environ(path='/hello', method='POST') result = self.mapper.match(environ=environ) self.assertEqual(handler.handle_405, result['action']) self.assertEqual('GET', result['_methods']) def test_405_headers(self): environ = _environ(path='/hello', method='POST') global headers, status headers = status = None def local_start_response(*args, **kwargs): global headers, status status = args[0] headers = {header[0]: header[1] for header in args[1]} handler.dispatch(environ, local_start_response, self.mapper) allow_header = headers['allow'] self.assertEqual('405 Method Not Allowed', status) self.assertEqual('GET', allow_header) # PEP 3333 requires that headers be whatever the native str # is in that version of Python. Never unicode. self.assertEqual(str, type(allow_header)) class PlacementLoggingTest(test.NoDBTestCase): @mock.patch("nova.api.openstack.placement.handler.LOG") def test_404_no_error_log(self, mocked_log): environ = _environ(path='/hello', method='GET') context_mock = mock.Mock() context_mock.to_policy_values.return_value = {'roles': ['admin']} environ['placement.context'] = context_mock app = handler.PlacementHandler() self.assertRaises(webob.exc.HTTPNotFound, app, environ, start_response) mocked_log.error.assert_not_called() mocked_log.exception.assert_not_called() class DeclarationsTest(test.NoDBTestCase): def setUp(self): super(DeclarationsTest, self).setUp() self.mapper = handler.make_map(handler.ROUTE_DECLARATIONS) def test_root_slash_match(self): environ = _environ(path='/') result = self.mapper.match(environ=environ) self.assertEqual(root.home, result['action']) def test_root_empty_match(self): environ = _environ(path='') result = self.mapper.match(environ=environ) self.assertEqual(root.home, result['action']) class ContentHeadersTest(test.NoDBTestCase): def setUp(self): super(ContentHeadersTest, self).setUp() self.environ = _environ(path='/') self.app = handler.PlacementHandler() def test_no_content_type(self): self.environ['CONTENT_LENGTH'] = '10' self.assertRaisesRegex(webob.exc.HTTPBadRequest, "content-type header required when " "content-length > 0", self.app, self.environ, start_response) def test_non_integer_content_length(self): self.environ['CONTENT_LENGTH'] = 'foo' self.assertRaisesRegex(webob.exc.HTTPBadRequest, "content-length header must be an integer", self.app, self.environ, start_response) def test_empty_content_type(self): self.environ['CONTENT_LENGTH'] = '10' self.environ['CONTENT_TYPE'] = '' self.assertRaisesRegex(webob.exc.HTTPBadRequest, "content-type header required when " "content-length > 0", self.app, self.environ, start_response) def test_empty_content_length_and_type_works(self): self.environ['CONTENT_LENGTH'] = '' self.environ['CONTENT_TYPE'] = '' self.app(self.environ, start_response) def test_content_length_and_type_works(self): self.environ['CONTENT_LENGTH'] = '10' self.environ['CONTENT_TYPE'] = 'foo' self.app(self.environ, start_response)
apache-2.0
-6,712,546,996,518,206,000
36.231959
78
0.618856
false
4.192107
true
false
false
stianstr/autodeploy
autodeploy/Api.py
1
3374
from DependencyContainer import DependencyContainer from Deployer import AlreadyDeployed import traceback dc = DependencyContainer() # Step 1 - Check if branch can be deployed def check(branch, server, user, internalCheck=False): checker = dc.getDeploymentChecker(server) result = checker.check(branch) if not internalCheck: result = {'check': result, 'result': result['result']} result['id'] = writeResult('check', branch, server, result, user) return result # Step 2 - Deploy branch (then observe if everything is ok) def deploy(branch, server, user): result = {} try: checkDetails = check(branch, server, user, internalCheck=True) if not checkDetails['result']: result = { 'result': False, 'message': 'Check failed', 'check': checkDetails, 'exception': None } else: deployer = dc.getDeployer(server) try: deployer.deploy(branch) result = { 'result': True, 'msesage': 'Deployed', 'exception': None, 'check': checkDetails } except AlreadyDeployed, e: result = { 'result': False, 'message': 'Already deployed', 'exception': None, 'check': {} } except Exception, e: result = { 'result': False, 'message': e.message, 'exception': traceback.format_exc(), 'check': {} } result['id'] = writeResult('deploy', branch, server, result, user) return result # Step 3 - Merge branch into master and switch server to master def merge(branch, server, user): # todo: sanity-check lister = dc.getBranchLister() if not lister.exists(branch): result = {'check': {}, 'result': False, 'message': 'No such branch'} else: try: merger = dc.getBranchMerger() merger.merge(branch) deployer = dc.getDeployer(server) deployer.deploy('master') result = {'check': {}, 'result': True} except Exception, e: result = {'check': {}, 'result': False} _exceptionToResult(e, result) result['id'] = writeResult('merge', branch, server, result, user) return result # meh, duplicated elsewhere def _exceptionToResult(exception, result): lines = exception.message.split('\n') for line in lines: line = line.strip() if line: result['message'] = line break #result['exception'] = '%s: %s' % (exception.__class__, exception.message) result['exception'] = traceback.format_exc() def getServers(): servers = dc.config['servers'] for server in servers: bc = dc.getRemoteBranchChecker(server['alias']) server['branch'] = bc.get() return servers def getBranches(): bl = dc.getBranchLister() return bl.list() def writeResult(type, branch, server, data, user): data['user'] = user data['type'] = type data['branch'] = branch data['server'] = server print 'DATA: %s' % data o = dc.getResultWriter() return o.write(data)
mit
2,985,443,141,897,539,600
29.396396
78
0.546829
false
4.244025
false
false
false
windskyer/k_nova
nova_extension/compute/ibm/etree_wrapper.py
1
4367
# ================================================================= # ================================================================= """Wrapper around ElementTree, using either the native implementation or lxml. This module creates a wrapper around the ElementTree library, picking an appropriate implementation for the environment. This module normalizes: * the exception when parsing, normalized to ParseError * lxml doesn't support unicode strings with encoding, so lxml converts unicode document to ascii. Reasons to use this: * not all systems have the lxml library * Python 2.6's native ElementTree has minimal support for XPATH. This module uses the following rule to pick the implementation: * If using Python 2.7, uses the native implementation. * Otherwise, if lxml is available, uses the lxml implementation. * Otherwise, uses the native implementation. (In this case, XPATH support will be minimal). References: * Python 2.7 native: http://docs.python.org/2.7/library/xml.etree.elementtree.html * Python 2.6 native: http://docs.python.org/2.6/library/xml.etree.elementtree.html * lxml: http://lxml.de/ To use this module: import etree_wrapper etree_wrapper.XML(some_xml_string) If the XML string passed to XML() is not valid, a ParseError is raised. """ import sys class ParseError(Exception): """Raised if the XML string could not be parsed.""" pass class _NativeImpl: def XML(self, raw_str): from xml.etree import ElementTree try: from xml.etree.ElementTree \ import ParseError as ImplParseError # noqa except ImportError: from xml.parsers.expat import ExpatError as ImplParseError # noqa try: return ElementTree.XML(raw_str) except ImplParseError as e: raise ParseError(e) def SubElement(self, parent, tag, attrib={}, **extra): from xml.etree import ElementTree return ElementTree.SubElement(parent, tag, attrib=attrib, **extra) def tostring(self, element): from xml.etree import ElementTree return ElementTree.tostring(element) def register_namespace(self, prefix, namespace): from xml.etree import ElementTree return ElementTree.register_namespace(prefix, namespace) class _LxmlImpl: def XML(self, raw_str): from lxml import etree # lxml does not support parsing a unicode string that has an encoding # value, so we convert a unicode string to ascii. raw_str_ascii = raw_str.encode('ascii', 'replace') try: return etree.XML(raw_str_ascii) except etree.XMLSyntaxError as e: raise ParseError(e) def SubElement(self, parent, tag, attrib={}, **extra): from lxml import etree return etree.SubElement(parent, tag, attrib=attrib, **extra) def tostring(self, element): from lxml import etree return etree.tostring(element) def register_namespace(self, prefix, namespace): """This is not necessary for lxml.""" pass def _calc_impl_name(version, have_lxml=None): if version < (2, 7): if have_lxml: return 'lxml' return 'native' return 'native' def _create_impl(impl_name): if impl_name == 'lxml': return _LxmlImpl() else: return _NativeImpl() def _check_have_lxml(): try: from lxml import etree return hasattr(etree, 'XML') except ImportError: return False def _create_impl_for_system(): version = sys.version_info if version < (2, 7): have_lxml = _check_have_lxml() else: have_lxml = None impl_name = _calc_impl_name(version, have_lxml=have_lxml) return _create_impl(impl_name) _impl = _create_impl_for_system() def XML(raw_str): """Parse the XML string. Raises ParseError if the raw_str could not be parsed. """ return _impl.XML(raw_str) def SubElement(parent, tag, attrib={}, **extra): """See the SubElement() documentation from python xml or lxml.""" return _impl.SubElement(parent, tag, attrib=attrib, **extra) def tostring(element): """See the tostring() documentation from python xml or lxml.""" return _impl.tostring(element) def register_namespace(prefix, namespace): return _impl.register_namespace(prefix, namespace)
apache-2.0
-7,815,707,756,236,600,000
27.542484
78
0.650561
false
4.163012
false
false
false
dhalleine/tensorflow
tensorflow/python/kernel_tests/control_flow_ops_py_test.py
1
59408
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-long-lambda """Tests for tensorflow.ops.control_flow_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.python.framework import function from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import logging_ops def check_op_order(graph): """Sanity check on the ordering of op id.""" for op in graph.get_operations(): for v in op.inputs: assert v.op._id < op._id or op.type == "Merge", ( "The id of %s must be less than the id of %s" % (v.op.name, op.name)) return True def check_consumers(graph): """Sanity check on the consumer list of the tensors.""" consumer_count = {} for op in graph.get_operations(): for v in op.inputs: cnt = consumer_count.get(v, 0) consumer_count[v] = cnt + 1 for k, v in consumer_count.items(): if len(k.consumers()) != v: return False return True def isum(s): i = tf.constant(0, name="i") c = lambda i, s: tf.less(i, 10) b = lambda i, s: [tf.add(i, 1), tf.add(i, s)] _, r_s = tf.while_loop(c, b, [i, s]) return r_s class ControlFlowTest(tf.test.TestCase): def testRefIdentity(self): with self.test_session(): v = tf.Variable(7) v = control_flow_ops._Identity(v) op = tf.assign(v, 9) v2 = control_flow_ops.with_dependencies([op], v) self.assertTrue(check_op_order(v.graph)) self.assertTrue(isinstance(v2, tf.Tensor)) tf.initialize_all_variables().run() self.assertEqual(9, v2.eval()) def testRefEnter(self): with self.test_session(): v = tf.Variable(7) enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True) nine = tf.constant(9) enter_nine = control_flow_ops.enter(nine, "foo_1") op = tf.assign(enter_v, enter_nine) v2 = control_flow_ops.with_dependencies([op], enter_v) v3 = control_flow_ops.exit(v2) tf.initialize_all_variables().run() self.assertEqual(9, v3.eval()) def testRefSwitch(self): with self.test_session(): v = tf.Variable(7) p = tf.constant(True) v1 = control_flow_ops._SwitchRefOrTensor(v.ref(), p) v2 = tf.assign(v1[1], 9) tf.initialize_all_variables().run() self.assertEqual(9, v2.eval()) def testEnterMulExit(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") enter_data = control_flow_ops.enter(data, "foo_1", False) five = tf.constant(5) enter_five = control_flow_ops.enter(five, "foo_1", False) mul_op = tf.mul(enter_data, enter_five) exit_op = control_flow_ops.exit(mul_op) result = exit_op.eval() self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result) def testSwitchMergeIndexedSlices(self): with self.test_session(): values = tf.constant([1, 2, 3, 4, 5, 6]) indices = tf.constant([0, 2, 4, 6, 8, 10]) data = tf.IndexedSlices(values, indices) pred = tf.convert_to_tensor(True) switch_op = control_flow_ops.switch(data, pred) merge_op = control_flow_ops.merge(switch_op)[0] val = merge_op.values.eval() ind = merge_op.indices.eval() self.assertAllEqual(np.arange(1, 7), val) self.assertAllEqual(np.arange(0, 12, 2), ind) def testSwitchDeadBranch(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) dead_branch = tf.identity(switch_op[0]) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, lambda e: "The tensor returned for" in str(e)): dead_branch.eval() def testSwitchMergeLess(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") zero = tf.convert_to_tensor(0) one = tf.convert_to_tensor(1) less_op = tf.less(zero, one) switch_op = control_flow_ops.switch(data, less_op) merge_op = control_flow_ops.merge(switch_op)[0] result = merge_op.eval() self.assertAllEqual(np.arange(1, 7), result) def testSwitchMergeAddIdentity(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(False, name="ports") switch_op = control_flow_ops.switch(data, ports) one = tf.constant(1) add_op = tf.add(switch_op[0], one) id_op = tf.identity(switch_op[1]) merge_op = control_flow_ops.merge([add_op, id_op])[0] result = merge_op.eval() self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result) def testSwitchMergeAddMul(self): with self.test_session(): data = tf.constant([1, 2, 3, 4, 5, 6], name="data") ports = tf.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) one = tf.constant(1) add_op = tf.add(switch_op[0], one) five = tf.constant(5) mul_op = tf.mul(switch_op[1], five) merge_op = control_flow_ops.merge([add_op, mul_op])[0] result = merge_op.eval() self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result) def testLoop_false(self): with self.test_session(): false = tf.convert_to_tensor(False) n = tf.constant(10) enter_false = control_flow_ops.enter(false, "foo_1", False) enter_n = control_flow_ops.enter(n, "foo_1", False) merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0] switch_n = control_flow_ops.switch(merge_n, enter_false) exit_n = control_flow_ops.exit(switch_n[0]) next_n = control_flow_ops.next_iteration(switch_n[0]) merge_n.op._update_input(1, next_n) result = exit_n.eval() self.assertAllEqual(10, result) def testLoop_1(self): with self.test_session(): zero = tf.constant(0) one = tf.constant(1) n = tf.constant(10) enter_i = control_flow_ops.enter(zero, "foo", False) enter_one = control_flow_ops.enter(one, "foo", True) enter_n = control_flow_ops.enter(n, "foo", True) with tf.device("/gpu:0"): merge_i = control_flow_ops.merge([enter_i, enter_i])[0] less_op = tf.less(merge_i, enter_n) cond_op = control_flow_ops.loop_cond(less_op) switch_i = control_flow_ops.switch(merge_i, cond_op) add_i = tf.add(switch_i[1], enter_one) next_i = control_flow_ops.next_iteration(add_i) merge_i.op._update_input(1, next_i) exit_i = control_flow_ops.exit(switch_i[0]) result = exit_i.eval() self.assertAllEqual(10, result) def testLoop_2(self): with self.test_session(): zero = tf.constant(0) one = tf.constant(1) n = tf.constant(10) enter_i = control_flow_ops.enter(zero, "foo", False) enter_one = control_flow_ops.enter(one, "foo", True) enter_n = control_flow_ops.enter(n, "foo", True) merge_i = control_flow_ops.merge([enter_i, enter_i])[0] less_op = tf.less(merge_i, enter_n) cond_op = control_flow_ops.loop_cond(less_op) switch_i = control_flow_ops.switch(merge_i, cond_op) add_i = tf.add(switch_i[1], enter_one) with tf.device("/gpu:0"): next_i = control_flow_ops.next_iteration(add_i) merge_i.op._update_input(1, next_i) exit_i = control_flow_ops.exit(switch_i[0]) result = exit_i.eval() self.assertAllEqual(10, result) def testCondBool(self): values = tf.constant(10) fn1 = lambda: tf.add(values, 1) fn2 = lambda: tf.sub(values, 1) with self.assertRaisesRegexp(TypeError, "must not be a Python bool"): _ = tf.cond(False, fn1, fn2) def testCondIndexedSlices(self): with self.test_session(): values = tf.constant(10) indices = tf.constant(0) x = tf.IndexedSlices(values, indices) pred = tf.less(1, 2) fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices) fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices) r = tf.cond(pred, fn1, fn2) val = r.values.eval() ind = r.indices.eval() self.assertTrue(check_op_order(x.values.graph)) self.assertAllEqual(11, val) self.assertAllEqual(0, ind) def testCondIndexedSlicesDifferentTypes(self): with self.test_session(): values = tf.constant(10) i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32) i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64) x = tf.IndexedSlices(values, i_32) pred = tf.less(1, 2) fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32) fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64) r = tf.cond(pred, fn1, fn2) val = r.values.eval() ind = r.indices.eval() self.assertTrue(check_op_order(x.values.graph)) self.assertAllEqual(11, val) self.assertAllEqual(0, ind) self.assertTrue(ind.dtype == np.int64) def testCondColocation(self): with self.test_session(use_gpu=True): with tf.device("/cpu:0"): v = tf.Variable(7.0) x = tf.constant(10.0) pred = tf.less(1.0, 2.0) fn1 = lambda: tf.add(v, 1.0) fn2 = lambda: tf.sub(x, 1.0) r = tf.cond(pred, fn1, fn2) for op in x.graph.get_operations(): if op.name == "cond/Add/Switch": self.assertDeviceEqual(op.device, "/cpu:0") def _testCond_1(self, use_gpu): with self.test_session(use_gpu=use_gpu): x = tf.constant(10) pred = tf.less(1, 2) fn1 = lambda: tf.add(x, 1) fn2 = lambda: tf.sub(x, 1) r = tf.cond(pred, fn1, fn2) result = r.eval() self.assertTrue(check_op_order(x.graph)) self.assertAllEqual(11, result) def testCond_1(self): self._testCond_1(use_gpu=False) self._testCond_1(use_gpu=True) def testCond_2(self): with self.test_session(): x = tf.constant(10) r = tf.cond(tf.less(1, 0), lambda: tf.add(x, 1), lambda: tf.sub(x, 1)) result = r.eval() self.assertTrue(check_op_order(x.graph)) self.assertAllEqual(9, result) def testCond_3(self): with self.test_session(): x = tf.constant(10) pred = tf.less(1, 2) fn1 = lambda: tf.add(x, 1) fn2 = lambda: tf.sub(x, 1) fn3 = lambda: tf.add(tf.cond(pred, fn1, fn2), 1) r = tf.cond(pred, fn3, fn2) result = r.eval() self.assertTrue(check_op_order(x.graph)) self.assertAllEqual(12, result) def testCond_4(self): with self.test_session(): v1 = tf.Variable(7) v2 = tf.Variable(7) v3 = tf.Variable(7) age = tf.constant(3) max_age = tf.constant(2) pred = tf.greater(age, max_age) fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op] fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op] r = tf.cond(pred, fn1, fn2) tf.initialize_all_variables().run() self.assertEqual(len(r), 2) result = r[1].eval() self.assertTrue(check_op_order(age.graph)) self.assertAllEqual(True, result) self.assertAllEqual(7, v1.eval()) self.assertAllEqual(2, v2.eval()) self.assertAllEqual(7, v3.eval()) def testCond_5(self): with self.test_session(): alive = tf.constant(True, name="alive") count = tf.constant(0, name="count") def body(i): return tf.cond( alive, lambda: [tf.less(i, 3), tf.add(count, 1)], lambda: [alive, count]) for i in range(10): alive, count = body(i) self.assertAllEqual(4, count.eval()) def testCond_6(self): with self.test_session(): v1 = tf.Variable([7]) age = tf.constant(3) pred = tf.greater(age, 4) fn1 = lambda: age fn2 = lambda: v1 r = tf.cond(pred, fn1, fn2) tf.initialize_all_variables().run() result = r.eval() self.assertAllEqual(np.array([7]), result) def testCond_7(self): with self.test_session() as sess: x = tf.constant(10) y = tf.constant(200) pred = tf.less(1, 2) fn1 = lambda: [tf.add(x, 1), tf.add(x, 2)] fn2 = lambda: [y, y] r = tf.cond(pred, fn1, fn2) self.assertAllEqual([11, 12], sess.run(r)) def testCondGrad_1(self): with self.test_session(): x = tf.constant(10.0, name="x") pred = tf.less(1, 2) fn1 = lambda: tf.identity(x) fn2 = lambda: tf.identity(x) r = tf.cond(pred, fn1, fn2) grad = tf.gradients(r, [x])[0] result = grad.eval() self.assertAllEqual(1.0, result) def testCondGrad_2(self): with self.test_session(): c = tf.placeholder(tf.int32, shape=[]) x = tf.constant(10.0) pred = tf.less(c, 2) fn1 = lambda: tf.mul(x, 42.0) fn2 = lambda: tf.mul(x, 3.0) r = tf.cond(pred, fn1, fn2) grad = tf.gradients(r, [x])[0] self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1})) self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3})) def testNestedCond_Simple(self): with self.test_session(): x = tf.constant(0., name="X") y = tf.cond(tf.constant(True), lambda: x, lambda: tf.cond(x < 1., lambda: x, lambda: x)) result = tf.gradients(y, x)[0] self.assertEqual(1.0, result.eval()) z = tf.cond(tf.constant(False), lambda: x, lambda: tf.cond(x < 1., lambda: x, lambda: x)) result = tf.gradients(z, x)[0] self.assertEqual(1.0, result.eval()) def testCondGrad_Gather(self): with self.test_session() as sess: v1 = tf.Variable([1.0, 42.0]) c = tf.placeholder(tf.int32, shape=[]) pred = tf.less(c, 2) fn1 = lambda: tf.identity(v1) fn2 = lambda: tf.gather(v1, [1, 1]) r = tf.cond(pred, fn1, fn2) grad = tf.gradients(r, [v1])[0] tf.initialize_all_variables().run() # Should just be [1, 1], but possibly a sparse representation gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1}) dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2) ] self.assertAllEqual(dense_gv, [1.0, 1.0]) # Should be [0, 2], as the else forwards v1[1] twice gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3}) dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2) ] self.assertAllEqual(dense_gv, [0.0, 2.0]) # Microbenchmark: 10,000 iterations took 0.21s. def testWhile_1(self): with self.test_session(): n = tf.constant(0) c = lambda x: tf.less(x, 10000) b = lambda x: tf.add(x, 1) r = tf.while_loop(c, b, [n], parallel_iterations=20) self.assertEqual(10000, r.eval()) def testWhileWithRefs_1(self): with self.test_session() as sess: x = tf.Variable(0).ref() i = tf.constant(0) c = lambda i, x: tf.less(i, 100) self.assertEqual(x.dtype, tf.int32_ref) def b(i, x): self.assertEqual(x.dtype, tf.int32_ref) return (i+1, gen_array_ops._ref_identity(x)) r = tf.while_loop(c, b, [i, x], parallel_iterations=5) tf.initialize_all_variables().run() self.assertEqual(r[0].dtype, tf.int32) self.assertEqual(r[1].dtype, tf.int32_ref) value_i, value_x = sess.run(r) self.assertEqual(100, value_i) self.assertEqual(0, value_x) def testWhile_2(self): with self.test_session(): s = tf.constant(0) r = isum(s) self.assertAllEqual(45, r.eval()) # Have more than 10 parallel iterations and hence exercise k-bound # most of the time. def testWhile_3(self): with self.test_session(): def compute(i, m, c, o): m, c = [tf.add(m, 1), tf.add(c, 1)] o = tf.add(o, m) o = tf.add(o, c) i = tf.add(i, 1) return [i, m, c, o] i = tf.convert_to_tensor(0) m = tf.convert_to_tensor(0) c = tf.convert_to_tensor(0) o = tf.convert_to_tensor(0) d = tf.convert_to_tensor(100) r = tf.while_loop( lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o]) result = r[3].eval() self.assertTrue(check_op_order(i.graph)) self.assertAllEqual(10100, result) def testWhile_4(self): with self.test_session(): def compute(i, m, c, o): m, c = [tf.gather(x, i), tf.gather(x, i)] o = tf.add(o, m) o = tf.add(o, c) i = tf.add(i, 1) return [i, m, c, o] i = tf.convert_to_tensor(0) m = tf.convert_to_tensor(0) c = tf.convert_to_tensor(0) o = tf.convert_to_tensor(0) x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6]) s = tf.size(x) r = tf.while_loop( lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o]) result = r[3].eval() self.assertTrue(check_op_order(i.graph)) self.assertAllEqual(42, result) def testWhile_5(self): with self.test_session(): def compute(i, c, o): c = tf.slice(x, tf.expand_dims(i, 0), [1]) o = tf.concat(0, [o, c]) i = tf.add(i, 1) return [i, c, o] i = tf.convert_to_tensor(0) c = tf.convert_to_tensor(0) o = tf.convert_to_tensor([0]) x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6]) s = tf.size(x) r = tf.while_loop( lambda i, c, o: tf.less(i, s), compute, [i, c, o]) result = r[2].eval() self.assertTrue(check_op_order(i.graph)) self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result) def _testWhile_Gpu_1(self, use_gpu): with self.test_session(use_gpu=use_gpu): n = tf.constant(1.0) c = lambda x: tf.less(x, 10.0) b = lambda x: tf.add(x, 1.0) r = tf.while_loop(c, b, [n]) self.assertAllClose(10.0, r.eval()) def testWhile_Gpu_1(self): self._testWhile_Gpu_1(use_gpu=False) self._testWhile_Gpu_1(use_gpu=True) def _testWhile_Gpu_2(self, use_gpu): with self.test_session(use_gpu=use_gpu): n = tf.constant(1.0) c = lambda x: tf.less(x, 10.0) def b(x): with tf.device("/cpu:0"): return tf.add(x, 1.0) r = tf.while_loop(c, b, [n]) self.assertAllClose(10.0, r.eval()) def testWhile_Gpu_2(self): self._testWhile_Gpu_1(use_gpu=False) self._testWhile_Gpu_1(use_gpu=True) def testWhileShape(self): with self.test_session(): i = tf.constant(0) m = tf.ones([2, 2]) c = lambda i, j: tf.less(i, 2) def _b(i, j): new_i = tf.add(i, 1) new_j = tf.tile(j, [2, 2]) return [new_i, new_j] r = tf.while_loop(c, _b, [i, m]) r = r[1] * tf.ones([8, 8]) self.assertAllEqual(np.ones((8, 8)), r.eval()) def testWhileShapeInference(self): with self.test_session(): i = tf.constant(0) m = tf.ones([2, 2]) c = lambda i, j: tf.less(i, 2) def _b(i, j): new_i = tf.add(i, 1) new_j = tf.concat(0, [j, j]) return [new_i, new_j] r = tf.while_loop(c, _b, [i, m]) self.assertTrue(r[1].get_shape()[0].value is None) self.assertEqual(r[1].get_shape()[1], tf.Dimension(2)) def _testNestedWhile_1(self, use_gpu): with self.test_session(use_gpu=use_gpu): n = tf.constant(0) def cpu_sum(s): c = lambda i, s: tf.less(i, 10) def b(i, s): i1 = tf.add(i, 1) with tf.device("/cpu:0"): s1 = tf.add(i, s) return i1, s1 _, r_s = tf.while_loop(c, b, [n, s]) return r_s c = lambda x: tf.less(x, 200) b = lambda x: tf.add(x, cpu_sum(n)) r = tf.while_loop(c, b, [n]) self.assertEqual(225, r.eval()) def testNestedWhile_1(self): self._testNestedWhile_1(use_gpu=False) self._testNestedWhile_1(use_gpu=True) def testWhileWithControl_1(self): with self.test_session(): n = tf.constant(0) r = tf.constant(0) condition = lambda n_, r_: tf.less(n_, 10) def body(n_, r_): n_ = tf.add(n_, 1) with r_.graph.control_dependencies([r_]): r_ = tf.constant(12) return [n_, r_] res = tf.while_loop(condition, body, [n, r], parallel_iterations=1) self.assertAllEqual(12, res[1].eval()) def testWhileWithControl_2(self): with self.test_session(): r = tf.constant(0) condition = lambda r_: tf.less(r_, 10) def body(r_): with r_.graph.control_dependencies([r_]): r_ = tf.constant(12) return [r_] res = tf.while_loop(condition, body, [r], parallel_iterations=1) self.assertAllEqual(12, res.eval()) def testWhileWithControl_3(self): with self.test_session() as sess: b = tf.placeholder(tf.bool) c = tf.constant(0) with tf.control_dependencies([b]): c = tf.while_loop(lambda x: x < 10, lambda x: x + 1, [c]) self.assertEqual(10, sess.run(c, {b: True})) def testCondWhile_1(self): with self.test_session(): n = tf.convert_to_tensor(0, name="n") c = lambda x: tf.less(x, 10) b = lambda x: tf.add(x, 1) r = tf.cond(tf.less(0, 1), lambda: tf.while_loop(c, b, [n]), lambda: n) self.assertAllEqual(10, r.eval()) def testCondWhile_2(self): with self.test_session(): n = tf.convert_to_tensor(0) c = lambda x: tf.less(x, 10) b = lambda x: tf.add(x, 1) r = tf.cond(tf.less(1, 0), lambda: tf.add(n, 1), lambda: tf.while_loop(c, b, [n])) self.assertAllEqual(10, r.eval()) def testWhileCond_1(self): with self.test_session(): i = tf.convert_to_tensor(0, name="i") n = tf.convert_to_tensor(10, name="n") one = tf.convert_to_tensor(1, name="one") c = lambda x: tf.less(x, n) # pylint: disable=undefined-variable # for OSS build b = lambda x: tf.cond( tf.constant(True), lambda: tf.add(x, one), lambda: tf.sub(x, one)) # pylint: enable=undefined-variable r = tf.while_loop(c, b, [i]) self.assertAllEqual(10, r.eval()) def testWhileCond_2(self): with self.test_session(): n = tf.convert_to_tensor(0, name="n") c = lambda x: tf.less(x, 10) b = lambda x: tf.cond(tf.constant(True), lambda: tf.add(x, 1), lambda: n) r = tf.while_loop(c, b, [n]) self.assertAllEqual(10, r.eval()) def testWhileCond_3(self): with self.test_session(): n = tf.convert_to_tensor(0) c = lambda x: tf.less(x, 10) # pylint: disable=undefined-variable # for OSS build b = lambda x: tf.cond(tf.less(0, 1), lambda: tf.add(x, 1), lambda: tf.sub(x, 1)) # pylint: enable=undefined-variable r = tf.while_loop(c, b, [n]) self.assertAllEqual(10, r.eval()) # NOTE: It is ok to have parallel_iterations > 1 def testWhileUpdateVariable_1(self): with self.test_session(): select = tf.Variable([3.0, 4.0, 5.0]) n = tf.constant(0) def loop_iterator(j): return tf.less(j, 3) def loop_body(j): ns = tf.scatter_update(select, j, 10.0) nj = tf.add(j, 1) op = control_flow_ops.group(ns) nj = control_flow_ops.with_dependencies([op], nj) return [nj] r = tf.while_loop(loop_iterator, loop_body, [n], parallel_iterations=1) self.assertTrue(check_op_order(n.graph)) tf.initialize_all_variables().run() self.assertEqual(3, r.eval()) result = select.eval() self.assertAllClose(np.array([10.0, 10.0, 10.0]), result) def testWhileUpdateVariable_2(self): with self.test_session(): select1 = tf.Variable([3.0, 4.0, 5.0]) select2 = tf.Variable([3.0, 4.0, 5.0]) n = tf.constant(0) def loop_iterator(j): return tf.less(j, 3) def loop_body(j): ns1 = tf.scatter_update(select1, j, 10.0) ns2 = tf.scatter_update(select2, j, 10.0) nj = tf.add(j, 1) op = control_flow_ops.group(ns1, ns2) nj = control_flow_ops.with_dependencies([op], nj) return [nj] r = tf.while_loop(loop_iterator, loop_body, [n], parallel_iterations=1) self.assertTrue(check_op_order(n.graph)) tf.initialize_all_variables().run() self.assertEqual(3, r.eval()) result1 = select1.eval() self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1) result2 = select2.eval() self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2) def testWhileUpdateVariable_3(self): with self.test_session(): select = tf.Variable([3.0, 4.0, 5.0]) n = tf.constant(0) def loop_iterator(j, _): return tf.less(j, 3) def loop_body(j, _): ns = tf.scatter_update(select, j, 10.0) nj = tf.add(j, 1) return [nj, ns] r = tf.while_loop(loop_iterator, loop_body, [n, tf.identity(select)], parallel_iterations=1) tf.initialize_all_variables().run() result = r[1].eval() self.assertTrue(check_op_order(n.graph)) self.assertAllClose(np.array([10.0, 10.0, 10.0]), result) # b/24814703 def testWhileUpdateVariable_4(self): with self.test_session(): var_a = tf.Variable(0, name="a") var_b = tf.Variable(0, name="b") tf.initialize_all_variables().run() c = tf.constant(0, name="c") asn1 = tf.assign_add(var_a, 1, name="a_add") # Loop condition def pred(i): return tf.less(i, 10) # Loop body def loop_body(i): asn2 = tf.assign_add(var_b, asn1, name="b_add") with tf.control_dependencies([asn2]): ni = tf.add(i, 1, name="i_add") return ni lpa = tf.while_loop(pred, loop_body, [c], parallel_iterations=1) self.assertEqual(0, var_b.eval()) lpa.eval() # Run the loop self.assertEqual(10, var_b.eval()) # b/24736492 def testWhileUpdateVariable_5(self): with self.test_session(): # Create some variables. var_a = tf.Variable(0, name="a") var_b = tf.Variable(0, name="b") tf.initialize_all_variables().run() # Change condition to check var_b def pred(_): return tf.less(var_b, 10) # Change body to increment var_b def loop_body(i): asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add") asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add") with tf.control_dependencies([asn1, asn2]): inc_b = tf.identity(var_b) return inc_b lpa = tf.while_loop(pred, loop_body, [var_b], 1, name="loop") self.assertEqual(0, var_b.eval()) lpa.eval() # Run the loop self.assertEqual(10, var_a.eval()) self.assertEqual(10, var_b.eval()) # b/24814668 def testWhileUpdateVariable_6(self): with self.test_session(): # Create some variables. var_a = tf.Variable(0, name="a") var_b = tf.Variable(0, name="b") c = tf.constant(0) tf.initialize_all_variables().run() # Loop condition def pred(i): return tf.less(i, 10) # Loop body def loop_body(i): asn1 = tf.assign_add(var_a, 1, name="a_add") with tf.control_dependencies([asn1]): asn2 = tf.assign_add(var_b, var_a, name="b_add") with tf.control_dependencies([asn2]): ni = tf.add(i, 1, name="i_add") return ni lpa = tf.while_loop(pred, loop_body, [c], 1, name="loop") self.assertEqual(0, var_b.eval()) lpa.eval() # Run the loop self.assertEqual(55, var_b.eval()) self.assertEqual(10, var_a.eval()) def testWhileQueue_1(self): with self.test_session(): q = tf.FIFOQueue(-1, tf.int32) i = tf.constant(0) def c(i): return tf.less(i, 10) def b(i): ni = tf.add(i, 1) ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni) return ni r = tf.while_loop(c, b, [i], parallel_iterations=1) self.assertEqual([10], r.eval()) for i in xrange(10): self.assertEqual([i], q.dequeue().eval()) def testWhileStack_1(self): with self.test_session(): s = gen_data_flow_ops._stack(tf.int32, stack_name="foo") i = tf.constant(0) def c(i): return tf.less(i, 10) def b(i): ni = tf.add(i, 1) ni = control_flow_ops.with_dependencies( [gen_data_flow_ops._stack_push(s, i)], ni) return ni r = tf.while_loop(c, b, [i], parallel_iterations=1) x = tf.constant(0) def c1(i, _): return tf.greater(i, 0) def b1(i, x): ni = tf.sub(i, 1) nx = x + gen_data_flow_ops._stack_pop(s, tf.int32) return [ni, nx] _, rx = tf.while_loop(c1, b1, [r, x], parallel_iterations=1) self.assertEqual(45, rx.eval()) def testWhileGrad_Square(self): with self.test_session(): v = tf.constant(2.0, name="v") c = lambda v: tf.less(v, 100.0) b = tf.square r = tf.while_loop(c, b, [v], parallel_iterations=1) r = control_flow_ops.cond(tf.less(1, 2), lambda: r, lambda: v) r = tf.gradients(r, v)[0] self.assertAllClose(1024.0, r.eval()) def testWhileGrad_Shape(self): with self.test_session(): x = tf.placeholder(tf.float32, shape=[None]) v = tf.constant([2.0], name="v") n = tf.constant(0, name="n") c = lambda i, v: tf.less(i, 5) b = lambda i, v: [i + 1, tf.mul(x, v)] r = tf.while_loop(c, b, [n, v], parallel_iterations=1) r = tf.gradients(r[1], x)[0] self.assertEqual([None], r.get_shape().as_list()) self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]})) def testWhileGrad_MultipleUses(self): with self.test_session(): v = tf.constant(2.0, name="v") c = lambda v: tf.less(v, 100.0) b = tf.square r = tf.while_loop(c, b, [v], parallel_iterations=1) r = tf.mul(r, r) r = tf.gradients(r, v)[0] self.assertEqual(524288.0, r.eval()) def testWhileGrad_LoopAdd(self): with self.test_session(): v = tf.constant(2.0, name="v") c = lambda v: tf.less(v, 100.0) b = tf.square r = tf.while_loop(c, b, [v], parallel_iterations=1) r = tf.add(r, r) r = tf.gradients(r, v)[0] self.assertAllClose(2048.0, r.eval()) def _testWhileGrad_Mul(self, use_gpu, p_iters): with self.test_session(use_gpu=use_gpu) as sess: a = tf.constant(3.0, name="a") v = tf.constant(2.0, name="v") c = lambda v: tf.less(v, 100.0) b = lambda v: tf.mul(v, a) r = tf.while_loop(c, b, [v], parallel_iterations=p_iters) grad_a, grad_v = tf.gradients(r, [a, v]) grad_a_val, grad_v_val = sess.run([grad_a, grad_v]) self.assertAllClose(216.0, grad_a_val) self.assertAllClose(81.0, grad_v_val) def testWhileGrad_Mul(self): self._testWhileGrad_Mul(use_gpu=False, p_iters=1) self._testWhileGrad_Mul(use_gpu=False, p_iters=10) self._testWhileGrad_Mul(use_gpu=True, p_iters=1) self._testWhileGrad_Mul(use_gpu=True, p_iters=10) def testWhileGrad_Variable(self): with self.test_session(): a = tf.Variable(3.0) v = tf.constant(2.0, name="v") c = lambda v: tf.less(v, 100.0) b = lambda v: tf.mul(v, a) r = tf.while_loop(c, b, [v], parallel_iterations=1) r = tf.gradients(r, a) tf.initialize_all_variables().run() self.assertAllClose(216.0, r[0].eval()) def testWhileGrad_ys_xs(self): with self.test_session(): x = tf.constant(3.0, name="x") y = tf.constant(2.0, name="y") c = lambda x, y: tf.less(x, 100.0) def b(x, y): y1 = tf.add(x, y) x1 = tf.mul(x, y1) return x1, y1 rx, ry = tf.while_loop(c, b, [x, y], parallel_iterations=1) r = tf.gradients([rx, ry], x) self.assertAllClose(304.0, r[0].eval()) r = tf.gradients([rx, ry], y) self.assertAllClose(124.0, r[0].eval()) r = tf.gradients([rx], x) self.assertAllClose(295.0, r[0].eval()) r = tf.gradients([rx], y) self.assertAllClose(120.0, r[0].eval()) def testWhileGrad_Dependency(self): with self.test_session(): i = tf.constant(0, name="i") x = tf.constant(2.0, name="x") c = lambda i, x: tf.less(i, 10) def b(i, x): x = tf.mul(x, 2.0) i = tf.add(i, 1) return i, x ri, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1) r = tf.gradients([ri, rx], x) self.assertAllClose(1024.0, r[0].eval()) r = tf.gradients([rx], x) self.assertAllClose(1024.0, r[0].eval()) def testWhileGrad_NoGradient(self): with self.test_session(): v = tf.constant(2.0, name="v") c = lambda v: tf.less(v, 100.0) b = tf.square r = tf.while_loop(c, b, [v], back_prop=False) r = tf.add(r, v) r = tf.gradients(r, v) self.assertAllClose(1.0, r[0].eval()) def testWhileGrad_NoDependency(self): with self.test_session() as sess: variable = tf.Variable(tf.ones([2, 3])) time = tf.zeros([], dtype=tf.int32) def cond(time, tensor, _): return time < 10 def body(time, tensor, _): return (time+1, tensor, tensor) loop_vars = [time, variable, variable] tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars) cost = tf.reduce_sum(tensors[2]) grad = tf.gradients(cost, [variable]) tf.initialize_all_variables().run() self.assertAllClose(np.ones([2, 3]), sess.run(grad[0])) def testWhileGrad_Const(self): with self.test_session() as sess: c0 = tf.constant(0.0, name="c0") c1 = tf.constant(1.0, name="c1") time = tf.constant(0, name="t") def cond(time, _): return time < 1 def body(time, tensor): return time+1, c1 loop_vars = [time, c0] tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars) cost = tf.reduce_sum(tensors[1]) grad = tf.gradients(cost, [c0]) self.assertAllClose(0.0, sess.run(grad[0])) def testWhileGrad_SerialTwoLoops(self): with self.test_session(): i = tf.constant(0, name="i") x = tf.constant(2.0, name="x") c = lambda i, x: tf.less(i, 5) def b(i, x): x = tf.mul(x, 2.0) i = tf.add(i, 1) return i, x _, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1) _, rx = tf.while_loop(c, b, [i, rx], parallel_iterations=1) r = tf.gradients([rx], x) self.assertAllClose(1024.0, r[0].eval()) def testWhileGrad_ParallelTwoLoops(self): with self.test_session(): i = tf.constant(0, name="i") x = tf.constant(2.0, name="x") c = lambda i, x: tf.less(i, 5) def b(i, x): x = tf.mul(x, 2.0) i = tf.add(i, 1) return i, x _, r1 = tf.while_loop(c, b, [i, x], parallel_iterations=1) _, r2 = tf.while_loop(c, b, [i, x], parallel_iterations=1) rx = tf.add(r1, r2) r = tf.gradients([rx], x) self.assertAllClose(64.0, r[0].eval()) def _testNestedWhileGrad_Simple(self, use_gpu): with self.test_session(use_gpu=use_gpu): v = tf.constant(1.0) def inner_loop(s): c = lambda x: tf.less(x, 4.0) b = lambda x: tf.mul(x, 2.0) return tf.while_loop(c, b, [s]) c = lambda x: tf.less(x, 2.0) b = lambda x: tf.mul(inner_loop(x), 2.0) r = tf.while_loop(c, b, [v]) r = tf.gradients(r, v)[0] self.assertAllClose(8.0, r.eval()) def testNestedWhileGrad_Simple(self): self._testNestedWhileGrad_Simple(use_gpu=False) self._testNestedWhileGrad_Simple(use_gpu=True) def testNestedWhileGrad_SerialInner(self): with self.test_session(): v = tf.constant(1.0) def inner_loop1(s): z = tf.constant(0) c = lambda i, x: tf.less(i, 4) b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)] return tf.while_loop(c, b, [z, s]) def inner_loop2(s): z = tf.constant(0) c = lambda i, x: tf.less(i, 4) b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)] return tf.while_loop(c, b, [z, s]) c = lambda x: tf.less(x, 128.0) b = lambda x: inner_loop2(inner_loop1(x)[1])[1] r = tf.while_loop(c, b, [v]) r = tf.gradients(r, v)[0] self.assertAllClose(256.0, r.eval()) def testNestedWhileGrad_ParallelInner(self): with self.test_session(): v = tf.constant(1.0) def inner_loop1(s): z = tf.constant(0) c = lambda i, x: tf.less(i, 4) b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)] return tf.while_loop(c, b, [z, s]) def inner_loop2(s): z = tf.constant(0) c = lambda i, x: tf.less(i, 4) b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)] return tf.while_loop(c, b, [z, s]) c = lambda x: tf.less(x, 128.0) b = lambda x: tf.mul(inner_loop1(x)[1], inner_loop2(x)[1]) r = tf.while_loop(c, b, [v]) r = tf.gradients(r, v)[0] self.assertAllClose(512.0, r.eval()) def _testWhileCondGrad_Simple(self, use_gpu): with self.test_session(use_gpu=use_gpu): v = tf.convert_to_tensor(2.0, name="v") n = tf.convert_to_tensor(100.0, name="n") one = tf.convert_to_tensor(1.0, name="one") c = lambda x: tf.less(x, n) # pylint: disable=undefined-variable # for OSS build b = lambda x: control_flow_ops.cond(tf.constant(True), lambda: tf.square(x), lambda: tf.sub(x, one)) # pylint: enable=undefined-variable r = tf.while_loop(c, b, [v]) r = tf.gradients(r, v)[0] self.assertAllClose(1024.0, r.eval()) def testWhileCondGrad_Simple(self): self._testWhileCondGrad_Simple(use_gpu=False) self._testWhileCondGrad_Simple(use_gpu=True) def testWhileCondGrad_UnknownShape(self): with self.test_session() as sess: v = tf.placeholder(tf.float32) n = tf.convert_to_tensor(100.0, name="n") one = tf.convert_to_tensor(1.0, name="one") c = lambda x: tf.less(x, n) # pylint: disable=undefined-variable # for OSS build b = lambda x: control_flow_ops.cond(tf.constant(True), lambda: tf.square(x), lambda: tf.sub(x, one)) # pylint: enable=undefined-variable r = tf.while_loop(c, b, [v]) r = tf.gradients(r, v)[0] r = sess.run(r, feed_dict={v: 2.0}) self.assertAllClose(1024.0, r) def testWhileWithRefsWithGradients_1(self): with self.test_session() as sess: x = tf.Variable(0).ref() i = tf.constant(0) c = lambda i, x: tf.less(i, 10) self.assertEqual(x.dtype, tf.int32_ref) # pylint: disable=protected-access def body(i, x): self.assertEqual(x.dtype, tf.int32_ref) return (i+1, gen_array_ops._ref_identity(x)) # pylint: enable=protected-access r = tf.while_loop(c, body, [i, x], parallel_iterations=5) grad_ys = [tf.Variable(73).ref()] grad = tf.gradients([r[1]], [x], grad_ys=grad_ys) tf.initialize_all_variables().run() self.assertEqual(r[0].dtype, tf.int32) self.assertEqual(r[1].dtype, tf.int32_ref) value_i, value_x, value_x_grad = sess.run(r + grad) self.assertEqual(10, value_i) self.assertEqual(0, value_x) self.assertEqual(73, value_x_grad) def testWhileGrad_IndexedSlices(self): with self.test_session(): values = tf.constant([2.0, 4.0], name="values") indices = tf.constant([0, 3], name="indices") shape = tf.constant([10], name="dense_shape") i = tf.constant(0) x = tf.IndexedSlices(values, indices, dense_shape=shape) def c(i, _): return i < 10 def b(i, x): return [i + 1, tf.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)] _, r = tf.while_loop(c, b, [i, x]) r = tf.gradients(r.values, values)[0] self.assertAllClose(np.array([1024.0, 1024.0]), r.eval()) def testWhileGrad_SparseTensor(self): with self.test_session(): values = tf.constant([2.0, 4.0], name="values") indices = tf.constant([[0], [3]], dtype=tf.int64, name="indices") shape = tf.constant([10], dtype=tf.int64, name="dense_shape") i = tf.constant(0) x = tf.SparseTensor(indices, values, shape=shape) def c(i, _): return i < 10 def b(i, x): return [i + 1, tf.SparseTensor(x.indices, x.values * 2.0, x.shape)] _, r = tf.while_loop(c, b, [i, x]) r = tf.gradients(r.values, values)[0] self.assertAllClose(np.array([1024.0, 1024.0]), r.eval()) def testCallGradInLoop(self): with self.test_session() as sess: i0 = tf.constant(0) params = tf.constant(5.0) params_1 = tf.square(params) def c(i, _): return i < 10 def b(i, x): data = tf.constant([1.0, 2.0, 3.0]) data = tf.mul(data, params_1) x1 = x + tf.gradients(data, params)[0] return i + 1, x1 output_grad = tf.while_loop(c, b, [i0, tf.constant(0.0)]) self.assertAllClose(600.0, sess.run(output_grad)[1]) def testWhileGradGrad(self): theta = tf.Variable(initial_value=1.) def fn(x, prev): return prev + x * theta result = tf.scan(fn, [1., 2., 3.]) grad_theta = tf.gradients(result, theta) with self.assertRaisesRegexp(TypeError, "Second-order gradient"): tf.gradients(grad_theta, theta) def testOneValueCond(self): with self.test_session(): c = tf.placeholder(tf.int32, shape=[]) one = tf.convert_to_tensor(1, name="one") two = tf.convert_to_tensor(2, name="two") p = tf.greater_equal(c, 1) i = tf.cond(p, lambda: one, lambda: two) self.assertTrue(isinstance(i, tf.Tensor)) # True case: c = 2 is >= 1 self.assertEqual([1], i.eval(feed_dict={c: 2})) # False case: c = 0 is not >= 1 self.assertEqual([2], i.eval(feed_dict={c: 0})) def testExampleCond(self): with self.test_session(): x = tf.convert_to_tensor([-2.0, 2.0], name="x") d = tf.placeholder(tf.int32, shape=[]) def l2(): return tf.sqrt(tf.reduce_sum(tf.square(x))) def l1(): return tf.reduce_sum(tf.abs(x)) i = tf.cond(tf.equal(d, 2), l2, l1) self.assertAllClose(4.0, i.eval(feed_dict={d: 1})) self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2})) def testCase(self): with self.test_session(): x = tf.constant(1) y = tf.constant(2) z = tf.constant(3) f1 = lambda: tf.constant(17) f2 = lambda: tf.constant(23) f3 = lambda: tf.constant(-1) r1 = tf.case({x < y: f1, x > z: f2}, default=f3, exclusive=True) self.assertAllEqual(r1.eval(), 17) r2 = tf.case([(y > z, f1), (y > x, f2)], default=f3) self.assertAllEqual(r2.eval(), 23) # Duplicate events can happen, first one is selected r3 = tf.case([(x < y, f1), (x < y, f2)], default=f3) self.assertAllEqual(r3.eval(), 17) # Duplicate events cause an error if exclusive = True r4 = tf.case([(x < y, f1), (x < y, f2)], default=f3, exclusive=True) with self.assertRaisesOpError( "More than one condition evaluated as True but exclusive=True."): r4.eval() # Check that the default is called if none of the others are r5 = tf.case({x > y: f1}, default=f3) self.assertAllEqual(r5.eval(), -1) ran_once = [False, False, False] def break_run_twice(ix): def _break(): ran_once[ix] = True return tf.constant(ix) return _break # Should not fail - each conditional gets called exactly once # except default. Default gets called twice: once to create an # empty output and once for the actual cond switch. r6 = tf.case([(x < y, break_run_twice(0)), (x > y, break_run_twice(1))], default=lambda: tf.constant(2)) self.assertAllEqual(r6.eval(), 0) def testCaseSideEffects(self): with self.test_session() as sess: v0 = tf.Variable(-1) v1 = tf.Variable(-1) v2 = tf.Variable(-1) a = lambda: control_flow_ops.with_dependencies([tf.assign(v0, 0)], 0) b = lambda: control_flow_ops.with_dependencies([tf.assign(v1, 1)], 1) c = lambda: control_flow_ops.with_dependencies([tf.assign(v2, 2)], 2) x = tf.constant(1) y = tf.constant(2) r0 = tf.case(((x < y, a), (x > y, b)), default=c, exclusive=True) r1 = tf.case(((x > y, a), (x < y, b)), default=c, exclusive=True) r2 = tf.case(((x > y, a), (x > y, b)), default=c, exclusive=True) tf.initialize_all_variables().run() self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3) self.assertEqual(2, r2.eval()) self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2]) tf.initialize_all_variables().run() self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3) self.assertEqual(1, r1.eval()) self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1]) tf.initialize_all_variables().run() self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3) self.assertEqual(0, r0.eval()) self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1]) def testOneOpCond(self): with self.test_session(): v = tf.Variable(0) c = tf.convert_to_tensor(0) one = tf.convert_to_tensor(1) two = tf.convert_to_tensor(2) p = tf.greater_equal(c, 1) def a(): return tf.assign(v, one) def b(): return tf.assign(v, two) i = tf.cond(p, a, b) self.assertTrue(isinstance(i, tf.Tensor)) tf.initialize_all_variables().run() self.assertEqual(0, v.eval()) # True case: c = 2 is >= 1, v is set to 1. self.assertEqual(1, i.eval(feed_dict={c.name: 2})) self.assertEqual(1, v.eval()) # False case: c = 0 is not >= 1, v is set to 2. self.assertEqual(2, i.eval(feed_dict={c.name: 0})) self.assertEqual(2, v.eval()) def testWithOpsDependencies(self): with self.test_session() as sess: v = tf.Variable(0.0) c = tf.constant(10) # Fetching v directly will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): sess.run([c, v]) # Use a control dependency to ensure init_variable is run # while asking for c real_v = control_flow_ops.with_dependencies( name="real_tensor", output_tensor=v.ref(), dependencies=[v.initializer]) c_val, real_v_val = sess.run([c, real_v]) # Ensure the result of 'real_c' is the same as 'c' self.assertAllEqual(10, c_val) # Ensure that 'v' is initialized self.assertAllClose(0.0, real_v_val) def testWithTensorDependencies(self): with self.test_session(): v = tf.Variable(0.0) c1 = tf.constant(10) c2 = tf.constant(20) # c1_with_init_v depends on the init op for v c1_with_init_v = control_flow_ops.with_dependencies( name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer]) # c2_with_c1 depends on the value of c1_with_init_v c2_with_c1_dep = control_flow_ops.with_dependencies( name="c2_with_c1_dep", output_tensor=c2, dependencies=[c1_with_init_v]) # Fetching v directly will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): v.eval() # Get the value of 'c2_with_c1_dep', which should cause 'v' # to be initialized. self.assertAllEqual(20, c2_with_c1_dep.eval()) # Ensure that 'v' is initialized self.assertAllClose(0.0, v.eval()) def testWithIndexedSlicesDependencies(self): with self.test_session(): v = tf.Variable( np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32)) v_at_1 = tf.IndexedSlices(v, tf.constant([1])) gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices) v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer], v_at_1) gather_v_at_1_after_init = tf.gather( v_at_1_after_init.values, v_at_1_after_init.indices) # Fetching gather_v_at_1 will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): gather_v_at_1.eval() # Getting gather_v_at_1_after_init will work, and initialize v. self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval()) # Double check that 'v' is initialized self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval()) def testDependenciesDevice(self): with tf.Graph().as_default(): # device set on tensor => same device on dep. with tf.device("/job:ps"): vd = tf.Variable([0.0]) with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd) self.assertTrue("/job:ps" in with_vd_dep.device) # No device set on tensor => no device on dep. vnod = tf.Variable([0.0]) with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer], vnod) self.assertDeviceEqual(None, with_vnod_dep.device) # device set on tensor, default device on graph => default device on dep. vdef = tf.Variable([0.0], name="vdef") with tf.device("/job:worker/gpu:1"): with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer], vdef) # The device is empty, but the colocation constraint is set. self.assertDeviceEqual("", with_vdef_dep.device) self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups()) def testGroup(self): with self.test_session() as sess: v1 = tf.Variable([0.0]) v2 = tf.Variable([1.0]) # Group init1 and init2 and run. init = control_flow_ops.group(v1.initializer, v2.initializer) # Fetching v1 directly will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): v1.eval() # Runs "init" before fetching v1 and v2. init.run() v1_val, v2_val = sess.run([v1, v2]) # Ensure that v1 and v2 are initialized self.assertAllClose([0.0], v1_val) self.assertAllClose([1.0], v2_val) def testGroupEmpty(self): op = tf.group() self.assertEqual(op.type, "NoOp") self.assertEqual(op.control_inputs, []) def testMergeShapes(self): # All inputs unknown. p1 = tf.placeholder(tf.float32) p2 = tf.placeholder(tf.float32) p3 = tf.placeholder(tf.float32) m, index = control_flow_ops.merge([p1, p2, p3]) self.assertIs(None, m.get_shape().ndims) self.assertEqual([], index.get_shape()) # All inputs known with different ranks. p1 = tf.placeholder(tf.float32, shape=[1, 2]) p2 = tf.placeholder(tf.float32, shape=[1, 2, 3]) m, index = control_flow_ops.merge([p1, p2]) self.assertIs(None, m.get_shape().ndims) self.assertEqual([], index.get_shape()) # All inputs known with some dimensions different. p1 = tf.placeholder(tf.float32, shape=[1, 2]) p2 = tf.placeholder(tf.float32, shape=[2, 1]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, None], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = tf.placeholder(tf.float32, shape=[1, 2]) p2 = tf.placeholder(tf.float32, shape=[None, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = tf.placeholder(tf.float32, shape=[1, 2]) p2 = tf.placeholder(tf.float32, shape=[2, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) # All inputs known with same dimensions. p1 = tf.placeholder(tf.float32, shape=[1, 2]) p2 = tf.placeholder(tf.float32, shape=[1, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([1, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = tf.placeholder(tf.float32, shape=[None, 2]) p2 = tf.placeholder(tf.float32, shape=[None, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = tf.placeholder(tf.float32, shape=[None, None]) p2 = tf.placeholder(tf.float32, shape=[None, None]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, None], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) def testRefSelect(self): index = tf.placeholder(tf.int32) # All inputs unknown. p1 = tf.placeholder(tf.float32) p2 = tf.placeholder(tf.float32) p3 = tf.placeholder(tf.float32) v1 = tf.Variable(p1, validate_shape=False) v2 = tf.Variable(p2, validate_shape=False) v3 = tf.Variable(p3, validate_shape=False) s = control_flow_ops.ref_select(index, [v1, v2, v3]) self.assertIs(None, s.get_shape().ndims) # All inputs known but different. v1 = tf.Variable([[1, 2]]) v2 = tf.Variable([[2], [1]]) s = control_flow_ops.ref_select(index, [v1, v2]) self.assertIs(None, s.get_shape().ndims) # All inputs known and same. v1 = tf.Variable([[1, 2]]) v2 = tf.Variable([[1, 2]]) s = control_flow_ops.ref_select(index, [v1, v2]) self.assertEqual([1, 2], s.get_shape()) # Possibly the same but not guaranteed. v1 = tf.Variable([[1., 2.]]) p2 = tf.placeholder(tf.float32, shape=[None, 2]) v2 = tf.Variable(p2, validate_shape=False) s = control_flow_ops.ref_select(index, [v1, v2]) self.assertEqual(None, s.get_shape()) def testRunLoopTensor(self): with self.test_session() as sess: tensor_list = [] def condition(t): return t < tf.constant(5) def body(_): tensor_list.append(tf.constant(5)) return tf.constant(10) result = tf.while_loop(condition, body, [tf.constant(4)]) self.assertEqual(10, sess.run(result)) # Ensure that we cannot run a tensor that escapes the loop body # accidentally. with self.assertRaises(ValueError): sess.run(tensor_list[0]) class TupleTest(tf.test.TestCase): def testTensors(self): for v1_first in [True, False]: with self.test_session(): v1 = tf.Variable([1.0]) add1 = tf.add( control_flow_ops.with_dependencies([v1.initializer], v1.ref()), 2.0) v2 = tf.Variable([10.0]) add2 = tf.add( control_flow_ops.with_dependencies([v2.initializer], v2.ref()), 20.0) t1, _, t2 = control_flow_ops.tuple([add1, None, add2]) # v1 is not initialized. with self.assertRaisesOpError("Attempting to use uninitialized value"): v1.eval() # v2 is not initialized. with self.assertRaisesOpError("Attempting to use uninitialized value"): v2.eval() if v1_first: # Getting t1 initializes v2. self.assertAllClose([3.0], t1.eval()) self.assertAllClose([10.0], v2.eval()) else: # Getting t2 initializes v1. self.assertAllClose([30.0], t2.eval()) self.assertAllClose([1.0], v1.eval()) def testIndexedSlices(self): for v1_first in [True, False]: with self.test_session(): v1 = tf.Variable( np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype( np.float32)) v1_at_1 = tf.IndexedSlices( control_flow_ops.with_dependencies([v1.initializer], v1.ref()), tf.constant([1])) v2 = tf.Variable( np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype( np.float32)) v2_at_1 = tf.IndexedSlices( control_flow_ops.with_dependencies([v2.initializer], v2.ref()), tf.constant([1])) st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1]) g1 = tf.gather(st1.values, st1.indices) g2 = tf.gather(st2.values, st2.indices) # v1 is not initialized. with self.assertRaisesOpError("Attempting to use uninitialized value"): v1.eval() # v2 is not initialized. with self.assertRaisesOpError("Attempting to use uninitialized value"): v2.eval() if v1_first: # Getting g1 initializes v2. self.assertAllClose([[10.0, 11.0]], g1.eval()) self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]], v2.eval()) else: # Getting g2 initializes v1. self.assertAllClose([[10.1, 11.1]], g2.eval()) self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v1.eval()) def testAcceptTensorsAsControlInputs(self): with self.test_session(): var = tf.Variable(0) assign = tf.assign(var, 1) t, = tf.tuple([tf.constant(0)], control_inputs=[assign]) # Should trigger the assign. t.eval() self.assertEquals(1, var.eval()) def testWhilePyFuncBasic(self): def func(x): return np.square(x) with self.test_session(): r = tf.while_loop( lambda i, v: i < 4, lambda i, v: [i + 1, tf.py_func(func, [v], [tf.float32])[0]], [tf.constant(0), tf.constant(2.0, tf.float32)]) self.assertEqual(r[1].eval(), 65536.0) def testWhileFuncBasic(self): @function.Defun(tf.float32) def func(x): return tf.square(tf.square(x)) with self.test_session(): x = tf.constant(2.0, tf.float32) r = tf.while_loop( lambda i, v: i < 2, lambda i, v: [i + 1, func(v)], [tf.constant(0), x]) self.assertEqual(r[1].eval(), 65536.0) r = tf.gradients(r, x)[0] self.assertEqual(r.eval(), 524288.0) self.assertEqual(len([op for op in x.graph.get_operations() if op.type == "Stack"]), 1) if __name__ == "__main__": tf.test.main()
apache-2.0
7,064,295,795,435,686,000
32.60181
80
0.57536
false
3.012118
true
false
false
gamesun/MyCdecl
main.py
1
5018
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright (c) 2013, gamesun # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of gamesun nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY GAMESUN "AS IS" AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL GAMESUN BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import wx import os import re QUALIFIER = ("const", "volatile", "signed", "unsigned") BASICTYPE = ("void", "char", "short", "int", "long", "float", "double", "struct", "enum", "union") regex_matchEnum = re.compile(r'enum[\s\S]*?\{[\s\S]*?\}\s(?P<enum>\w+)') regex_matchUnion = re.compile(r'union[\s\S]*?\{(?:[^{}]|\{[^{}]*\})*\}\s(?P<enum>\w+)') regex_matchStruct = re.compile(r'struct[\s\S]*?\{[\s\S]*?\}\s(?P<enum>\w+)') regex_matchType = re.compile(r'typedef\s.*?(?P<type>\w+);') class MyApp(wx.App): def OnInit(self): path = os.getcwd() self.DeclarationAnalysis(path) return True def DeclarationAnalysis(self, path): typeDecl = self.FindAllTypeDecl(path) print typeDecl[0] print typeDecl[1] print typeDecl[2] print typeDecl[3] print variableList = self.FindAllVariable(path, typeDecl) print variableList[0] print variableList[1] print variableList[2] print variableList[3] def FindAllVariable(self, path, typeDecl): """ return as [] """ variableList = [['enum', []], ['union', []], ['srtuct', []], ['type', []]] for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: extension = os.path.splitext(filename)[1] if extension == '.h' or extension == '.c': filepath = os.path.join(dirpath, filename) f = open(filepath, "rb") string = f.read() f.close() for e in typeDecl[0][1]: variableList[0][1] += re.findall('%s\s+(\w+);' % e, string) for u in typeDecl[1][1]: variableList[1][1] += re.findall('%s\s+(\w+);' % u, string) for s in typeDecl[2][1]: variableList[2][1] += re.findall('%s\s+(\w+);' % s, string) for t in typeDecl[3][1]: variableList[3][1] += re.findall('%s\s+(\w+);' % t, string) return variableList def FindAllTypeDecl(self, path): """ return as [ (enum,(,,,)), (union,(,,,)), (struct,(,,,)), [type,[,,,]] ] """ result = [['enum', []], ['union', []], ['srtuct', []], ['type', []]] for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: extension = os.path.splitext(filename)[1] if extension == '.h' or extension == '.c': filepath = os.path.join(dirpath, filename) f = open(filepath, "rb") string = f.read() f.close() result[0][1] += regex_matchEnum.findall(string) result[1][1] += regex_matchUnion.findall(string) result[2][1] += regex_matchStruct.findall(string) result[3][1] += regex_matchType.findall(string) result[3][1] += BASICTYPE return result if __name__ == '__main__': app = MyApp(0) app.MainLoop()
bsd-3-clause
-6,601,908,336,828,603,000
38.144
100
0.535273
false
4.004789
false
false
false
SnowWalkerJ/quantlib
quant/data/wind/tables/asharestockrating.py
1
2127
from ....common.db.sql import VARCHAR, Numeric as NUMBER, DateTime as DATETIME, Column, BaseModel, CLOB, DATE VARCHAR2 = VARCHAR class AShareStockRating(BaseModel): """ 4.75 中国A股投资评级明细 Attributes ---------- object_id: VARCHAR2(100) 对象ID s_info_windcode: VARCHAR2(40) Wind代码 s_est_institute: VARCHAR2(100) 研究机构名称 s_est_ratinganalyst: VARCHAR2(100) 分析师名称 s_est_estnewtime_inst: VARCHAR2(8) 评级日期 s_est_scorerating_inst: NUMBER(20,4) 本次标准评级 s_est_prescorerating_inst: NUMBER(20,4) 前次标准评级 s_est_lowprice_inst: NUMBER(20,4) 本次最低目标价 s_est_highprice_inst: NUMBER(20,4) 本次最高目标价 s_est_prelowprice_inst: NUMBER(20,4) 前次最低目标价 s_est_prehighprice_inst: NUMBER(20,4) 前次最高目标价 ann_dt: VARCHAR2(8) 公告日期(内部) 记录了盈利预测信息到达万得平台的时间, 该字段精确到”日”, 未保存具体的时点。 s_est_rating_inst: VARCHAR(20) 本次评级 s_est_prerating_inst: VARCHAR(20) 前次评级 opdate: DATETIME opdate opmode: VARCHAR(1) opmode """ __tablename__ = "AShareStockRating" object_id = Column(VARCHAR2(100), primary_key=True) s_info_windcode = Column(VARCHAR2(40)) s_est_institute = Column(VARCHAR2(100)) s_est_ratinganalyst = Column(VARCHAR2(100)) s_est_estnewtime_inst = Column(VARCHAR2(8)) s_est_scorerating_inst = Column(NUMBER(20,4)) s_est_prescorerating_inst = Column(NUMBER(20,4)) s_est_lowprice_inst = Column(NUMBER(20,4)) s_est_highprice_inst = Column(NUMBER(20,4)) s_est_prelowprice_inst = Column(NUMBER(20,4)) s_est_prehighprice_inst = Column(NUMBER(20,4)) ann_dt = Column(VARCHAR2(8)) s_est_rating_inst = Column(VARCHAR(20)) s_est_prerating_inst = Column(VARCHAR(20)) opdate = Column(DATETIME) opmode = Column(VARCHAR(1))
gpl-3.0
3,560,199,213,089,072,600
29.5
109
0.619249
false
2.424359
false
false
false
nikpap/inspire-next
inspirehep/modules/workflows/tasks/classifier.py
1
4238
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2015 CERN. # # INSPIRE is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Set of tasks for classification.""" from functools import wraps from ..proxies import antihep_keywords def filter_core_keywords(obj, eng): """Filter core keywords.""" result = obj.extra_data.get('classifier_results').get("complete_output") if result is None: return filtered_core_keywords = {} for core_keyword, times_counted in result.get("Core keywords").items(): if core_keyword not in antihep_keywords: filtered_core_keywords[core_keyword] = times_counted result["Filtered Core keywords"] = filtered_core_keywords obj.extra_data['classifier_results']["complete_output"] = result def classify_paper(taxonomy, rebuild_cache=False, no_cache=False, output_limit=20, spires=False, match_mode='full', with_author_keywords=False, extract_acronyms=False, only_core_tags=False, fast_mode=False): """Extract keywords from a pdf file or metadata in a OAI harvest.""" @wraps(classify_paper) def _classify_paper(obj, eng): from invenio_classifier.errors import ClassifierException from invenio_classifier import ( get_keywords_from_text, get_keywords_from_local_file, ) params = dict( taxonomy_name=taxonomy, output_mode='dict', output_limit=output_limit, spires=spires, match_mode=match_mode, no_cache=no_cache, with_author_keywords=with_author_keywords, rebuild_cache=rebuild_cache, only_core_tags=only_core_tags, extract_acronyms=extract_acronyms ) fast_mode = False try: # FIXME: May need to find another canonical way of getting PDF if "pdf" in obj.extra_data: result = get_keywords_from_local_file( obj.extra_data["pdf"], **params ) else: data = [] titles = obj.data.get('titles') if titles: data.extend([t.get('title', '') for t in titles]) abstracts = obj.data.get('abstracts') if abstracts: data.extend([t.get('value', '') for t in abstracts]) if not data: obj.log.error("No classification done due to missing data.") return result = get_keywords_from_text(data, **params) fast_mode = True except ClassifierException as e: obj.log.exception(e) return result['complete_output'] = clean_instances_from_data( result.get("complete_output", {}) ) result["fast_mode"] = fast_mode # Check if it is not empty output before adding if any(result.get("complete_output", {}).values()): obj.extra_data['classifier_results'] = result return _classify_paper def clean_instances_from_data(output): """Check if specific keys are of InstanceType and replace them with their id.""" from invenio_classifier.reader import KeywordToken new_output = {} for output_key in output.keys(): keywords = output[output_key] for key in keywords: if isinstance(key, KeywordToken): keywords[key.id] = keywords.pop(key) new_output[output_key] = keywords return new_output
gpl-2.0
-5,475,445,670,009,735,000
36.504425
84
0.607834
false
4.187747
false
false
false
trosa/forca
reportlab/lib/testutils.py
1
11504
#Copyright ReportLab Europe Ltd. 2000-2008 #see license.txt for license details __version__='''$Id: testutils.py 3662 2010-02-09 11:23:58Z rgbecker $''' __doc__="""Provides support for the test suite. The test suite as a whole, and individual tests, need to share certain support functions. We have to put these in here so they can always be imported, and so that individual tests need to import nothing more than "reportlab.whatever..." """ import sys, os, string, fnmatch, copy, re from ConfigParser import ConfigParser import unittest # Helper functions. def isWritable(D): try: fn = '00DELETE.ME' f = open(fn, 'w') f.write('test of writability - can be deleted') f.close() if os.path.isfile(fn): os.remove(fn) return 1 except: return 0 _OUTDIR = None RL_HOME = None testsFolder = None def setOutDir(name): """Is it a writable file system distro being invoked within test directory? If so, can write test output here. If not, it had better go in a temp directory. Only do this once per process""" global _OUTDIR, RL_HOME, testsFolder if _OUTDIR: return _OUTDIR D = [d[9:] for d in sys.argv if d.startswith('--outdir=')] if D: _OUTDIR = D[-1] try: os.makedirs(_OUTDIR) except: pass for d in D: sys.argv.remove(d) else: assert name=='__main__',"setOutDir should only be called in the main script" scriptDir=os.path.dirname(sys.argv[0]) if not scriptDir: scriptDir=os.getcwd() _OUTDIR = scriptDir if not isWritable(_OUTDIR): _OUTDIR = get_rl_tempdir('reportlab_test') import reportlab RL_HOME=reportlab.__path__[0] if not os.path.isabs(RL_HOME): RL_HOME=os.path.normpath(os.path.abspath(RL_HOME)) topDir = os.path.dirname(RL_HOME) testsFolder = os.path.join(topDir,'tests') if not os.path.isdir(testsFolder): testsFolder = os.path.join(os.path.dirname(topDir),'tests') if not os.path.isdir(testsFolder): if name=='__main__': scriptDir=os.path.dirname(sys.argv[0]) if not scriptDir: scriptDir=os.getcwd() testsFolder = os.path.abspath(scriptDir) else: testsFolder = None if testsFolder: sys.path.insert(0,os.path.dirname(testsFolder)) return _OUTDIR def outputfile(fn): """This works out where to write test output. If running code in a locked down file system, this will be a temp directory; otherwise, the output of 'test_foo.py' will normally be a file called 'test_foo.pdf', next door. """ D = setOutDir(__name__) if fn: D = os.path.join(D,fn) return D def printLocation(depth=1): if sys._getframe(depth).f_locals.get('__name__')=='__main__': outDir = outputfile('') if outDir!=_OUTDIR: print 'Logs and output files written to folder "%s"' % outDir def makeSuiteForClasses(*classes): "Return a test suite with tests loaded from provided classes." suite = unittest.TestSuite() loader = unittest.TestLoader() for C in classes: suite.addTest(loader.loadTestsFromTestCase(C)) return suite def getCVSEntries(folder, files=1, folders=0): """Returns a list of filenames as listed in the CVS/Entries file. 'folder' is the folder that should contain the CVS subfolder. If there is no such subfolder an empty list is returned. 'files' is a boolean; 1 and 0 means to return files or not. 'folders' is a boolean; 1 and 0 means to return folders or not. """ join = os.path.join split = string.split # If CVS subfolder doesn't exist return empty list. try: f = open(join(folder, 'CVS', 'Entries')) except IOError: return [] # Return names of files and/or folders in CVS/Entries files. allEntries = [] for line in f.readlines(): if folders and line[0] == 'D' \ or files and line[0] != 'D': entry = split(line, '/')[1] if entry: allEntries.append(join(folder, entry)) return allEntries # Still experimental class extending ConfigParser's behaviour. class ExtConfigParser(ConfigParser): "A slightly extended version to return lists of strings." pat = re.compile('\s*\[.*\]\s*') def getstringlist(self, section, option): "Coerce option to a list of strings or return unchanged if that fails." value = ConfigParser.get(self, section, option) # This seems to allow for newlines inside values # of the config file, but be careful!! val = string.replace(value, '\n', '') if self.pat.match(val): return eval(val) else: return value # This class as suggested by /F with an additional hook # to be able to filter filenames. class GlobDirectoryWalker: "A forward iterator that traverses files in a directory tree." def __init__(self, directory, pattern='*'): self.index = 0 self.pattern = pattern directory.replace('/',os.sep) if os.path.isdir(directory): self.stack = [directory] self.files = [] else: from reportlab.lib.utils import isCompactDistro, __loader__, rl_isdir if not isCompactDistro() or not __loader__ or not rl_isdir(directory): raise ValueError('"%s" is not a directory' % directory) self.directory = directory[len(__loader__.archive)+len(os.sep):] pfx = self.directory+os.sep n = len(pfx) self.files = map(lambda x, n=n: x[n:],filter(lambda x,pfx=pfx: x.startswith(pfx),__loader__._files.keys())) self.stack = [] def __getitem__(self, index): while 1: try: file = self.files[self.index] self.index = self.index + 1 except IndexError: # pop next directory from stack self.directory = self.stack.pop() self.files = os.listdir(self.directory) # now call the hook self.files = self.filterFiles(self.directory, self.files) self.index = 0 else: # got a filename fullname = os.path.join(self.directory, file) if os.path.isdir(fullname) and not os.path.islink(fullname): self.stack.append(fullname) if fnmatch.fnmatch(file, self.pattern): return fullname def filterFiles(self, folder, files): "Filter hook, overwrite in subclasses as needed." return files class RestrictedGlobDirectoryWalker(GlobDirectoryWalker): "An restricted directory tree iterator." def __init__(self, directory, pattern='*', ignore=None): GlobDirectoryWalker.__init__(self, directory, pattern) if ignore == None: ignore = [] self.ignoredPatterns = [] if type(ignore) == type([]): for p in ignore: self.ignoredPatterns.append(p) elif type(ignore) == type(''): self.ignoredPatterns.append(ignore) def filterFiles(self, folder, files): "Filters all items from files matching patterns to ignore." indicesToDelete = [] for i in xrange(len(files)): f = files[i] for p in self.ignoredPatterns: if fnmatch.fnmatch(f, p): indicesToDelete.append(i) indicesToDelete.reverse() for i in indicesToDelete: del files[i] return files class CVSGlobDirectoryWalker(GlobDirectoryWalker): "An directory tree iterator that checks for CVS data." def filterFiles(self, folder, files): """Filters files not listed in CVS subfolder. This will look in the CVS subfolder of 'folder' for a file named 'Entries' and filter all elements from the 'files' list that are not listed in 'Entries'. """ join = os.path.join cvsFiles = getCVSEntries(folder) if cvsFiles: indicesToDelete = [] for i in xrange(len(files)): f = files[i] if join(folder, f) not in cvsFiles: indicesToDelete.append(i) indicesToDelete.reverse() for i in indicesToDelete: del files[i] return files # An experimental untested base class with additional 'security'. class SecureTestCase(unittest.TestCase): """Secure testing base class with additional pre- and postconditions. We try to ensure that each test leaves the environment it has found unchanged after the test is performed, successful or not. Currently we restore sys.path and the working directory, but more of this could be added easily, like removing temporary files or similar things. Use this as a base class replacing unittest.TestCase and call these methods in subclassed versions before doing your own business! """ def setUp(self): "Remember sys.path and current working directory." self._initialPath = copy.copy(sys.path) self._initialWorkDir = os.getcwd() def tearDown(self): "Restore previous sys.path and working directory." sys.path = self._initialPath os.chdir(self._initialWorkDir) class NearTestCase(unittest.TestCase): def assertNear(a,b,accuracy=1e-5): if isinstance(a,(float,int)): if abs(a-b)>accuracy: raise AssertionError("%s not near %s" % (a, b)) else: for ae,be in zip(a,b): if abs(ae-be)>accuracy: raise AssertionError("%s not near %s" % (a, b)) assertNear = staticmethod(assertNear) class ScriptThatMakesFileTest(unittest.TestCase): """Runs a Python script at OS level, expecting it to produce a file. It CDs to the working directory to run the script.""" def __init__(self, scriptDir, scriptName, outFileName, verbose=0): self.scriptDir = scriptDir self.scriptName = scriptName self.outFileName = outFileName self.verbose = verbose # normally, each instance is told which method to run) unittest.TestCase.__init__(self) def setUp(self): self.cwd = os.getcwd() global testsFolder scriptDir=self.scriptDir if not os.path.isabs(scriptDir): scriptDir=os.path.join(testsFolder,scriptDir) os.chdir(scriptDir) assert os.path.isfile(self.scriptName), "Script %s not found!" % self.scriptName if os.path.isfile(self.outFileName): os.remove(self.outFileName) def tearDown(self): os.chdir(self.cwd) def runTest(self): fmt = sys.platform=='win32' and '"%s" %s' or '%s %s' p = os.popen(fmt % (sys.executable,self.scriptName),'r') out = p.read() if self.verbose: print out status = p.close() assert os.path.isfile(self.outFileName), "File %s not created!" % self.outFileName
gpl-2.0
6,965,741,556,274,241,000
32.546547
119
0.59362
false
4.104174
true
false
false
zxqzx/scripts
emailmodule.py
1
1669
#!/usr/bin/env python3 # Import smtplib for the actual sending function import smtplib # Import the email modules we'll need from email.mime.text import MIMEText import sys import argparse #User Options globalsender = "" loginrequired = "no" server = "localhost" port = "587" starttls = "no" username = "username" password = "password" #Main Function def main(to, sender, subject, message, attachment): msg = MIMEText(message, 'plain') if attachment == "html": msg = MIMEText(message, 'html') if globalsender != "": sender = globalsender msg['Subject'] = subject msg['To'] = to msg['From'] = sender #msg['Content-Type'] = "text/html; charset='utf-8'" #msg['Mime-Version'] = "1.0" #msg['Content-Transfer-Encoding'] = "base64" print(msg) s = smtplib.SMTP(server + ":" + port) if starttls == "yes": s.starttls() if loginrequired == "yes": s.login(username, password) s.send_message(msg) s.quit() if __name__ == "__main()__": parser = argparse.ArgumentParser(description='A') parser.add_argument('-t', '--to', dest='users', nargs = '*', help='Email address of the receiver.') parser.add_argument('-s', '--subject', dest = 'subject', nargs = '*', help = "Subject of the message") parser.add_argument('-m', '--message', dest = 'message', nargs = '*', help = "The actual content of the message") parser.add_argument('-f', '--sender', dest = 'from', nargs = '*', help = "Who the message is from") args = parser.parse_args() to = ", ".join(args.users) subject = ' '.join(args.subject) message = ' '.join(args.message) main(to, sender, subject, message, "plain")
mit
-3,210,705,563,425,402,000
20.960526
70
0.627322
false
3.131332
false
false
false
haddocking/disvis
disvis/IO/mmcif.py
1
1979
from __future__ import print_function import sys from collections import OrderedDict import numpy as np def parse_cif(infile): if isinstance(infile, file): pass elif isinstance(infile, str): infile = open(infile) else: raise TypeError("Input should either be a file or string.") atom_site = OrderedDict() with infile as f: for line in f: if line.startswith('_atom_site.'): words = line.split('.') atom_site[words[1].strip()] = [] if line.startswith('ATOM'): words = line.split() for key, word in zip(atom_site, words): atom_site[key].append(word) natoms = len(atom_site['id']) dtype = [('atom_id', np.int64), ('name', np.str_, 4), ('resn', np.str_, 4), ('chain', np.str_, 2), ('resi', np.int64), ('x', np.float64), ('y', np.float64), ('z', np.float64), ('occupancy', np.float64), ('bfactor', np.float64), ('element', np.str_, 2), ('charge', np.str_, 2), ('model', np.int64), ] cifdata = np.zeros(natoms, dtype=dtype) cifdata['atom_id'] = np.asarray(atom_site['id'], dtype=np.int64) cifdata['name'] = atom_site['label_atom_id'] cifdata['resn'] = atom_site['label_comp_id'] cifdata['chain'] = atom_site['label_asym_id'] cifdata['resi'] = atom_site['label_seq_id'] cifdata['x'] = atom_site['Cartn_x'] cifdata['y'] = atom_site['Cartn_y'] cifdata['z'] = atom_site['Cartn_z'] cifdata['occupancy'] = atom_site['occupancy'] cifdata['bfactor'] = atom_site['B_iso_or_equiv'] cifdata['element'] = atom_site['type_symbol'].title() cifdata['charge'] = atom_site['pdbx_formal_charge'] cifdata['model'] = atom_site['pdbx_PDB_model_num'] return cifdata if __name__=='__main__': import sys infile = sys.argv[1] data = parse_cif(infile)
apache-2.0
8,828,443,199,216,002,000
33.12069
68
0.54472
false
3.27649
false
false
false
uclouvain/osis
attribution/tests/ddd/domain/test_teacher.py
1
2419
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django.test import SimpleTestCase from attribution.tests.ddd.factories.teacher import TeacherFactory class TestInitTeacher(SimpleTestCase): def test_full_name_with_spaces(self): obj = TeacherFactory( last_name=" Truc", first_name=" Machin", middle_name=" Chose " ) self.assertEqual(obj.full_name, 'TRUC Machin Chose') def test_full_name_without_middle_name(self): obj = TeacherFactory( last_name=" Truc", first_name=" Machin", ) self.assertEqual(obj.full_name, 'TRUC Machin') class TestTeacherHash(SimpleTestCase): def test_assert_equals(self): obj_1 = TeacherFactory( last_name="Truc", first_name="Machin", middle_name="Chose" ) obj_2 = TeacherFactory( last_name="Truc", first_name="Machin", middle_name="Chose" ) self.assertEqual(obj_1, obj_2) def test_assert_not_equals(self): obj_1 = TeacherFactory() obj_2 = TeacherFactory() self.assertNotEqual(obj_1, obj_2)
agpl-3.0
-4,446,630,505,314,006,000
34.043478
87
0.612903
false
3.976974
true
false
false
halbbob/dff
api/gui/model/search_model.py
1
16526
# DFF -- An Open Source Digital Forensics Framework # Copyright (C) 2009-2011 ArxSys # This program is free software, distributed under the terms of # the GNU General Public License Version 2. See the LICENSE file # at the top of the source tree. # # See http://www.digital-forensic.org for more information about this # project. Please do not directly contact any of the maintainers of # DFF for assistance; the project provides a web site, mailing lists # and IRC channels for your use. # # Author(s): # Solal Jacob <[email protected]> # Romain Bertholon <[email protected]> # from PyQt4.QtCore import SIGNAL, QAbstractItemModel, QModelIndex, QVariant, Qt, QDateTime, QSize, QThread, QMutex, QSemaphore from PyQt4.QtGui import QColor, QIcon, QImage, QImageReader, QPixmap, QPixmapCache, QStandardItemModel, QStandardItem from PyQt4 import QtCore import re from api.types.libtypes import Variant, vtime from api.vfs.libvfs import VFS from api.events.libevents import EventHandler from Queue import * class SearchModel(QAbstractItemModel, EventHandler): """ The VFSItemModel, inheriting QAbstractItemModel, is used by views of the node browser. Data are fetched directly in the VFS. In QTableView, only two column are always displayed : * nodes' names * nodes' size This is up to users to configure which columns they want to display, according to nodes' attributes. The currently selected node's children are storedn in the list self.node_list More documentation on QAbstractItemModel() can be found at : * http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/html/qabstractitemmodel.html """ def __init__(self, __parent = None, event=False, fm = False): """ Constructor. """ QAbstractItemModel.__init__(self, __parent) EventHandler.__init__(self) # init root + some values self.rootItem = None self.__parent = __parent self.VFS = VFS.Get() self.map = {} self.imagesthumbnails = None self.connect(self, SIGNAL("dataImage"), self.setDataImage) self.translation() self.fetchedItems = 0 self.thumbQueued = {} self.fm = fm self.fm = False self.checkedNodes = set() # those list contains nodes' children of the currently selcted node. self.node_list = [] # list of headers the user choosed to display. self.header_list = [] self.type_list = [] self.disp_module = 0 self.del_sort = 0 self.cacheAttr = (None, None) # connect the mode to the VFS to receive its events if event: self.VFS.connection(self) def setFilterRegExp(self, regExp): return def Event(self, e): """ This method is called when an event is emitted by the VFS (when a node is added into the VFS for example, and the view needs to be redrawed). """ parent = self.rootItem if parent != None: self.node_list = parent.children() # emit signals to redraw the gui self.emit(SIGNAL("layoutAboutToBeChanged()")) self.emit(SIGNAL("layoutChanged()")) def setHeaderData(self, section, orientation, value, role): """ \reimp Add a header data into the header. Emit a `layoutAboutToBeChanged` signal before adding the header and `layoutChanged` once it is done. """ self.emit(SIGNAL("layoutAboutToBeChanged()")) QAbstractItemModel.setHeaderData(self, section, orientation, value, role) self.emit(SIGNAL("layoutChanged()")) def setDataImage(self, index, node, image): pixmap = QPixmap().fromImage(image) pixmapCache.insert(str(node.this), pixmap) self.__parent.currentView().update(index) def imagesThumbnails(self): return self.imagesthumbnails def setRootPath(self, node, kompleter = None): """ Set the path of the root node. """ self.fetchedItems = 0 typeWorker.clear() self.rootItem = node if node != None: self.sort(HNAME, Qt.AscendingOrder) if kompleter == None: self.emit(SIGNAL("rootPathChanged"), node) self.reset() def qMin(self, x, y): """ Return `x` if it inferior to `y`, `y` otherwise. """ if x < y: return x else: return y def rowCount(self, parent): """ \returns the number of children of lines of the index `parent`. """ return len(self.node_list) def headerData(self, section, orientation, role=Qt.DisplayRole): """ \reimp \return the header data which role is `role`, or an invalid QVariant() if the data could not be fetched. """ if role != Qt.DisplayRole: return QVariant() nb_s = section - 2 - self.disp_module - self.del_sort if orientation == Qt.Horizontal: if section == HNAME: return QVariant(self.nameTr) elif section == HSIZE: return QVariant(self.sizeTr) elif (self.disp_module != 0) and (section == HMODULE): return QVariant(self.moduleTr) elif (self.del_sort != 0): if (self.disp_module != 0): if (section == (HMODULE + 1)): return QVariant(self.deletedTr) elif section == HMODULE: return QVariant(self.deletedTr) if nb_s >= (len(self.header_list) + len(self.type_list)): return QVariant() elif nb_s >= len(self.header_list): return QVariant(self.type_list[nb_s - len(self.header_list)]) else: return QVariant(self.header_list[nb_s]) def data(self, index, role): """ \reimp Data which can be fetched differs from one view to another and also depends on users configuration. Each nodes' attributes can be displayed in views, or hidden, depending on what users want to display. The only two columns always displayed are node's name and nodes' size (`HNAME` and `HSIZE` columns). The mand types of informations that can be displayed, in addition on names and sizes, are : * the name of the module who generated the node * the MAC time of the nodes (if any) * the mimi-type of the node * all dynamic extended attributes of the node. * a flag indicating if the node is deleted or not Sorting can be performed on all the data by clicking in the correponding header. \param index the index where the data is located \param role the role of the data \return the data which index is `index` and role is `role`, or an invalid QVariant if the date is invalid. """ if not index.isValid(): return QVariant() if index.row() > len(self.node_list) or index.row() < 0: return QVariant() node = self.node_list[index.row()] column = index.column() if role == Qt.DisplayRole : # return name, size and eventually module columns if column == HNAME: return QVariant(node.name()) if column == HSIZE: return QVariant(node.size()) if (self.disp_module != 0) and (column == HMODULE): return QVariant(node.fsobj().name) elif (self.del_sort != 0): if (self.disp_module != 0): if (column == (HMODULE + 1)): return QVariant(node.isDeleted()) elif column == HMODULE: return QVariant(node.isDeleted()) # return attributes and type columns try : nb_c = column - 2 - self.disp_module - self.del_sort if nb_c >= (len(self.header_list) + len(self.type_list)): return QVariant() # index error elif nb_c >= len(self.header_list): # the data is a dataType type = self.type_list[nb_c - len(self.header_list)] possible_type = node.dataType().value() return QVariant(possible_type[str(type)].value()) else: if self.cacheAttr[0] != long(node.this): self.cacheAttr = (long(node.this), node.fsoAttributes()) attr = self.cacheAttr[1] value = attr[str(self.header_list[nb_c])] val = value.value() if val == None: return QVariant(" N / A ") if value.type() == 13: return QVariant(QDateTime(val.get_time())) else: return QVariant(val) except IndexError: return QVariant() return QVariant() # returns data corresponding to the role passed in parameter to data() method (icon, background, # etc.) if role == Qt.ForegroundRole: if column == 0: if node.isDeleted(): return QVariant(QColor(Qt.red)) if role == Qt.DecorationRole: if column == HNAME: if not self.imagesthumbnails: return QVariant(QIcon(node.icon())) else: mtype = str(node.dataType()) if mtype.find("broken") != -1: return QVariant(QIcon(":file_broken.png")) pixmap = pixmapCache.find(str(node.this)) if pixmap: return QVariant(QIcon(pixmap)) elif typeWorker.isImage(mtype): typeWorker.enqueue(self, index, node) return QVariant(QIcon(":file_temporary.png")) return QVariant(QIcon(node.icon())) if role == Qt.CheckStateRole: if column == HNAME: if (long(node.this), 0) in self.checkedNodes: if node.hasChildren(): return Qt.PartiallyChecked else: return Qt.Checked elif (long(node.this), 1) in self.checkedNodes: return Qt.Checked else: return Qt.Unchecked return QVariant() def setImagesThumbnails(self, flag): """ Set the image thumbnail. """ self.imagesthumbnails = flag def columnCount(self, parent = QModelIndex()): """ \reimp This number is variable, depending on the configuration. \return the number of displayed columns (at least 2, name and size columns) """ # 2 is for columns names and sizes return len(self.header_list) + 2 + len(self.type_list) \ + self.disp_module + self.del_sort def index(self, row, column, parent = QModelIndex()): """ \reimp Get the index located at row `row` and column `column`, which parent is `parent`. Create the index if it does note exist by calling QAbstractItemModel.createIndex() \param row the row where the index should be located. \param column the column where the index should be located. \param parent the parent of the index (invalid QModelIndex by default, corresponding to root node). \return the index, or an invalid index if an error occured. """ if not self.hasIndex(row, column, parent): return QModelIndex() if parent.isValid(): parentItem = self.VFS.getNodeFromPointer(parent.internalId()) else: parentItem = self.rootItem if row < len(self.node_list): childItem = self.node_list[row] else: return QModelIndex() index = self.createIndex(row, column, long(childItem.this)) return index def parent(self, index): """ \reimp \return the parent index of `index` or an invalid QModelIndex if an erroc occurs. """ if not index.isValid(): return QModelIndex() childItem = self.VFS.getNodeFromPointer(index.internalId()) parentItem = childItem.parent() if parentItem.this == self.rootItem.this: return QModelIndex() index = self.createIndex(parentItem.at() , 0, long(parentItem.this)) return index def hasChildren(self, parent): """ \reimp \return `True` if index `parent` has at least one child, `False` the otherwise. """ if not parent.isValid(): self.parentItem = self.rootItem return self.rootItem.hasChildren() else: self.parentItem = self.VFS.getNodeFromPointer(parent.internalId()) return self.parentItem.hasChildren() def setData(self, index, value, role): """ \reimp Set the data which value is `value` at index `index` with role `role`. \return `True` if no error occured, `False` otherwise. """ if not index.isValid(): return QVariant() if role == Qt.CheckStateRole: column = index.column() if column == HNAME: node = self.VFS.getNodeFromPointer(index.internalId()) if value == Qt.Unchecked: if (long(node.this), 0) in self.checkedNodes: self.checkedNodes.remove((long(node.this), 0)) else: self.checkedNodes.remove((long(node.this), 1)) elif value == Qt.PartiallyChecked: self.checkedNodes.add((long(node.this), 0)) elif value == Qt.Checked: if node.hasChildren(): if (long(node.this), 0) not in self.checkedNodes: self.checkedNodes.add((long(node.this), 0)) else: self.checkedNodes.remove((long(node.this), 0)) self.checkedNodes.add((long(node.this), 1)) else: self.checkedNodes.add((long(node.this) , 1)) return True #return true if ok def flags(self, flag): """ \reimp \return the Qt.ItemFlags of the model. """ return (Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsTristate | Qt.ItemIsEnabled ) def dataTypeByKey(self, stype, node): try: return node.dataType().value()[str(stype)].value() except IndexError: return None def fsoAttributesByKey(self, stype, node): try: val = node.fsoAttributes()[stype] if isinstance(val.value(), vtime): return val.value().get_time() return val except IndexError: return Variant() def sort(self, column, order): """ \reimp Overload of the sort method used to sort data in the view, according to a given column. It calls the `sorted()` python built-in function, which documentation can be found at : * http://wiki.python.org/moin/HowTo/Sorting/ Emit a `layoutAboutToBeChanged()` signal before sorting, and a `layoutChanged()` signal once the sorting is finished. It can a few seconds on important data volumes. \param column the column on which the user wants to perform the sorting. \param the order in which the user wants to sort (`Qt.DescendingOrder` or `Qt.AscendingOrder`). """ parentItem = self.rootItem if parentItem == None: return children_list = parentItem.children() if order == Qt.DescendingOrder: Reverse = True else: Reverse = False self.emit(SIGNAL("layoutAboutToBeChanged()")) if column == HNAME: # sort by name self.node_list = sorted(children_list, key=lambda Node: Node.name(), reverse=Reverse) self.emit(SIGNAL("layoutChanged()")) return elif column == HSIZE: # sort be size self.node_list = sorted(children_list, key=lambda Node: Node.size(), reverse=Reverse) self.emit(SIGNAL("layoutChanged()")) return elif (self.disp_module == 1) and (column == HMODULE): # sort by module's name self.node_list = sorted(children_list, key=lambda Node: Node.fsobj(), reverse=Reverse) self.emit(SIGNAL("layoutChanged()")) return elif (self.del_sort != 0): if (self.disp_module != 0): if (column == (HMODULE + 1)): # sort by deleted falg self.node_list = sorted(children_list, key=lambda Node: Node.isDeleted(), reverse=Reverse) self.emit(SIGNAL("layoutChanged()")) return elif column == HMODULE: self.node_list = sorted(children_list, key=lambda Node: Node.isDeleted(), reverse=Reverse) self.emit(SIGNAL("layoutChanged()")) return if (column - 2) >= (len(self.header_list) + len(self.type_list)): # default sorting if column is out of range self.node_list = sorted(children_list, key=lambda Node: Node.name(), reverse=Reverse) elif column - 2 >= len(self.header_list): # sorting on the mime type type = self.type_list[column - 2 - len(self.header_list)] self.node_list = sorted(children_list, \ key= lambda Node: self.dataTypeByKey(str(type), Node), \ reverse=Reverse) else: # sort on an extended attribute. self.node_list = sorted(children_list, \ key=lambda Node: self.fsoAttributesByKey(str(self.header_list[column - 2]), Node), \ reverse=Reverse) self.emit(SIGNAL("layoutChanged()")) def translation(self): """ Used for translating the framework. """ self.nameTr = self.tr('Name') self.sizeTr = self.tr('Size') self.ATimeTr = self.tr('Accessed time') self.CTimeTr = self.tr('Changed time') self.MTimeTr = self.tr('Modified time') self.moduleTr = self.tr('Module') self.deletedTr = self.tr('Deleted')
gpl-2.0
-762,149,715,988,854,500
33.645702
125
0.63863
false
3.786893
false
false
false
bailabs/bench-v7
install.py
1
10678
# wget setup_frappe.py | python import os, sys, subprocess, getpass, json, multiprocessing, shutil, platform from distutils.spawn import find_executable tmp_bench_repo = '/tmp/bench-v7' def install_bench(args): check_distribution_compatibility() check_brew_installed() # pre-requisites for bench repo cloning install_package('curl') install_package('wget') success = run_os_command({ 'apt-get': [ 'sudo apt-get update', 'sudo apt-get install -y git build-essential python-setuptools python-dev libffi-dev libssl-dev' ], 'yum': [ 'sudo yum groupinstall -y "Development tools"', 'sudo yum install -y epel-release redhat-lsb-core git python-setuptools python-devel openssl-devel libffi-devel' ], # epel-release is required to install redis, so installing it before the playbook-run. # redhat-lsb-core is required, so that ansible can set ansible_lsb variable }) if not find_executable("git"): success = run_os_command({ 'brew': 'brew install git' }) if not success: print 'Could not install pre-requisites. Please check for errors or install them manually.' return # secure pip installation if find_executable('pip'): run_os_command({ 'yum': 'sudo pip install --upgrade setuptools pip', 'apt-get': 'sudo pip install --upgrade setuptools pip', 'brew': "sudo pip install --upgrade setuptools pip --user" }) else: if not os.path.exists("get-pip.py"): run_os_command({ 'apt-get': 'wget https://bootstrap.pypa.io/get-pip.py', 'yum': 'wget https://bootstrap.pypa.io/get-pip.py' }) success = run_os_command({ 'apt-get': 'sudo python get-pip.py', 'yum': 'sudo python get-pip.py', }) if success: run_os_command({ 'pip': 'sudo pip install --upgrade pip setuptools', }) # Restricting ansible version due to following bug in ansible 2.1 # https://github.com/ansible/ansible-modules-core/issues/3752 success = run_os_command({ 'pip': "sudo pip install 'ansible==2.0.2.0'" }) if not success: could_not_install('Ansible') # clone bench repo if not args.run_travis: clone_bench_repo(args) if not args.user: if args.production: args.user = 'frappe' elif os.environ.has_key('SUDO_USER'): args.user = os.environ['SUDO_USER'] else: args.user = getpass.getuser() if args.user == 'root': raise Exception('Please run this script as a non-root user with sudo privileges, but without using sudo or pass --user=USER') # create user if not exists extra_vars = vars(args) extra_vars.update(frappe_user=args.user) if os.path.exists(tmp_bench_repo): repo_path = tmp_bench_repo else: repo_path = os.path.join(os.path.expanduser('~'), 'bench') extra_vars.update(repo_path=repo_path) run_playbook('develop/create_user.yml', extra_vars=extra_vars) extra_vars.update(get_passwords(args.run_travis or args.without_bench_setup)) if args.production: extra_vars.update(max_worker_connections=multiprocessing.cpu_count() * 1024) branch = 'master' if args.production else 'master' extra_vars.update(branch=branch) if args.develop: run_playbook('develop/install.yml', sudo=True, extra_vars=extra_vars) elif args.production: run_playbook('production/install.yml', sudo=True, extra_vars=extra_vars) if os.path.exists(tmp_bench_repo): shutil.rmtree(tmp_bench_repo) def check_distribution_compatibility(): supported_dists = {'ubuntu': [14, 15, 16], 'debian': [7, 8], 'centos': [7], 'macos': [10.9, 10.10, 10.11, 10.12]} dist_name, dist_version = get_distribution_info() if dist_name in supported_dists: if float(dist_version) in supported_dists[dist_name]: return print "Sorry, the installer doesn't support {0} {1}. Aborting installation!".format(dist_name, dist_version) if dist_name in supported_dists: print "Install on {0} {1} instead".format(dist_name, supported_dists[dist_name][-1]) sys.exit(1) def get_distribution_info(): # return distribution name and major version if platform.system() == "Linux": current_dist = platform.dist() return current_dist[0].lower(), current_dist[1].rsplit('.')[0] elif platform.system() == "Darwin": current_dist = platform.mac_ver() return "macos", current_dist[0].rsplit('.', 1)[0] def install_python27(): version = (sys.version_info[0], sys.version_info[1]) if version == (2, 7): return print 'Installing Python 2.7' # install python 2.7 success = run_os_command({ 'apt-get': 'sudo apt-get install -y python2.7', 'yum': 'sudo yum install -y python27', 'brew': 'brew install python' }) if not success: could_not_install('Python 2.7') # replace current python with python2.7 os.execvp('python2.7', ([] if is_sudo_user() else ['sudo']) + ['python2.7', __file__] + sys.argv[1:]) def install_package(package): package_exec = find_executable(package) if not package_exec: success = run_os_command({ 'apt-get': ['sudo apt-get install -y {0}'.format(package)], 'yum': ['sudo yum install -y {0}'.format(package)] }) else: return if not success: could_not_install(package) def check_brew_installed(): if 'Darwin' not in os.uname(): return brew_exec = find_executable('brew') if not brew_exec: raise Exception(''' Please install brew package manager before proceeding with bench setup. Please run following to install brew package manager on your machine, /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" ''') def clone_bench_repo(args): '''Clones the bench repository in the user folder''' if os.path.exists(tmp_bench_repo): return 0 elif args.without_bench_setup: clone_path = os.path.join(os.path.expanduser('~'), 'bench') else: clone_path = tmp_bench_repo branch = args.bench_branch or 'master' repo_url = args.repo_url or 'https://github.com/bailabs/bench-v7.git' success = run_os_command( {'git': 'git clone {repo_url} {bench_repo} --depth 1 --branch {branch}'.format( repo_url=repo_url, bench_repo=clone_path, branch=branch)} ) return success def run_os_command(command_map): '''command_map is a dictionary of {'executable': command}. For ex. {'apt-get': 'sudo apt-get install -y python2.7'} ''' success = True for executable, commands in command_map.items(): if find_executable(executable): if isinstance(commands, basestring): commands = [commands] for command in commands: returncode = subprocess.check_call(command, shell=True) success = success and ( returncode == 0 ) break return success def could_not_install(package): raise Exception('Could not install {0}. Please install it manually.'.format(package)) def is_sudo_user(): return os.geteuid() == 0 def get_passwords(ignore_prompt=False): if not ignore_prompt: mysql_root_password, admin_password = '', '' pass_set = True while pass_set: # mysql root password if not mysql_root_password: mysql_root_password = getpass.unix_getpass(prompt='Please enter mysql root password: ') conf_mysql_passwd = getpass.unix_getpass(prompt='Re-enter mysql root password: ') if mysql_root_password != conf_mysql_passwd: mysql_root_password = '' continue # admin password if not admin_password: admin_password = getpass.unix_getpass(prompt='Please enter the default Administrator user password: ') conf_admin_passswd = getpass.unix_getpass(prompt='Re-enter Administrator password: ') if admin_password != conf_admin_passswd: admin_password = '' continue pass_set = False else: mysql_root_password = admin_password = 'travis' passwords = { 'mysql_root_password': mysql_root_password, 'admin_password': admin_password } if not ignore_prompt: passwords_file_path = os.path.join(os.path.expanduser('~'), 'passwords.txt') with open(passwords_file_path, 'w') as f: json.dump(passwords, f, indent=1) print 'Passwords saved at ~/passwords.txt' return passwords def get_extra_vars_json(extra_args): # We need to pass production as extra_vars to the playbook to execute conditionals in the # playbook. Extra variables can passed as json or key=value pair. Here, we will use JSON. json_path = os.path.join('/tmp', 'extra_vars.json') extra_vars = dict(extra_args.items()) with open(json_path, mode='w') as j: json.dump(extra_vars, j, indent=1, sort_keys=True) return ('@' + json_path) def run_playbook(playbook_name, sudo=False, extra_vars=None): args = ['ansible-playbook', '-c', 'local', playbook_name] if extra_vars: args.extend(['-e', get_extra_vars_json(extra_vars)]) if extra_vars.get('verbosity'): args.append('-vvvv') if sudo: user = extra_vars.get('user') or getpass.getuser() args.extend(['--become', '--become-user={0}'.format(user)]) if os.path.exists(tmp_bench_repo): cwd = tmp_bench_repo else: cwd = os.path.join(os.path.expanduser('~'), 'bench') success = subprocess.check_call(args, cwd=os.path.join(cwd, 'playbooks')) return success def parse_commandline_args(): import argparse parser = argparse.ArgumentParser(description='Frappe Installer') # Arguments develop and production are mutually exclusive both can't be specified together. # Hence, we need to create a group for discouraging use of both options at the same time. args_group = parser.add_mutually_exclusive_group() args_group.add_argument('--develop', dest='develop', action='store_true', default=False, help='Install developer setup') args_group.add_argument('--production', dest='production', action='store_true', default=False, help='Setup Production environment for bench') parser.add_argument('--site', dest='site', action='store', default='site1.local', help='Specifiy name for your first ERPNext site') parser.add_argument('--verbose', dest='verbosity', action='store_true', default=False, help='Run the script in verbose mode') parser.add_argument('--user', dest='user', help='Install frappe-v7 for this user') parser.add_argument('--bench-branch', dest='bench_branch', help='Clone a particular branch of bench repository') parser.add_argument('--repo-url', dest='repo_url', help='Clone bench from the given url') # To enable testing of script using Travis, this should skip the prompt parser.add_argument('--run-travis', dest='run_travis', action='store_true', default=False, help=argparse.SUPPRESS) parser.add_argument('--without-bench-setup', dest='without_bench_setup', action='store_true', default=False, help=argparse.SUPPRESS) args = parser.parse_args() return args if __name__ == '__main__': try: import argparse except ImportError: # install python2.7 install_python27() args = parse_commandline_args() install_bench(args) print '''Frappe/ERPNext has been successfully installed!'''
gpl-3.0
-1,744,825,575,128,492,300
29.249292
127
0.699569
false
3.158237
false
false
false
diN0bot/ProcrasDonate
adwords/views/main.py
1
2790
import settings from lib.view_utils import render_response, render_string, HttpResponseRedirect from django.core.urlresolvers import reverse from django.contrib.auth.decorators import user_passes_test from adwords.models import * def adword_page(request, group): page = "landing" return render_response(request, 'adwords/landing_pages/%s.html' % group, locals()) def adword_click(request, page, group): return render_response(request, 'adwords/click_to_page_base.html', locals()) def adword_done(request, page, group): return render_response(request, 'adwords/done_page.html', locals()) def adword_email_form(request, page, group): if request.POST: email = request.POST.get('email', None) if email: email = email.strip() visitor = Visitor.add(group, page, email) try: # send email for recipient user to reset password txt = render_string(request, 'adwords/email.txt', {'email': email, 'settings': settings, 'visitor': visitor, 'group': group, 'page': page}) visitor.send_email("Welcome to ProcrasDonate", txt, from_email=settings.EMAIL) return HttpResponseRedirect(reverse('adword_done', args=(page, group))) except: Log.Error("Adword visitor::Problem sending thank you email to %s for %s \ (maybe email address does not exist?)" % (email, visitor), "adword") return HttpResponseRedirect(reverse('adword_done', args=(page, group))) @user_passes_test(lambda u: u.is_superuser) def dashboard(request): # table = rows of groups, with columns the total no. of emails registered per page table = [] click_tos = Visitor.objects.all().values_list('email_page', flat=True).order_by().distinct() # construct header row header_row = ["Landing Page", "Total"] for click_to in click_tos: header_row.append(click_to) table.append(header_row) # construct rest of rows groups = Visitor.objects.all().values_list('page_group', flat=True).order_by().distinct() for group in groups: row = [group, Visitor.objects.filter(page_group=group).count()] for click_to in click_tos: row.append(Visitor.objects.filter(email_page=click_to, page_group=group).count()) table.append(row) return render_response(request, 'adwords/dashboard.html', locals())
agpl-3.0
758,947,347,224,055,200
40.641791
96
0.574194
false
4.318885
false
false
false
odahoda/noisicaa
noisicaa/ui/graph/base_node.py
1
34059
#!/usr/bin/python3 # @begin:license # # Copyright (c) 2015-2019, Benjamin Niemann <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @end:license import functools import logging import os.path from typing import cast, Any, Optional, Dict, List, Iterable from PyQt5.QtCore import Qt from PyQt5 import QtCore from PyQt5 import QtGui from PyQt5 import QtSvg from PyQt5 import QtWidgets from noisicaa import constants from noisicaa import audioproc from noisicaa import core from noisicaa import value_types from noisicaa import music from noisicaa import node_db from noisicaa.ui import ui_base from noisicaa.ui import mute_button logger = logging.getLogger(__name__) port_colors = { node_db.PortDescription.UNDEFINED: QtGui.QColor(150, 150, 150), node_db.PortDescription.AUDIO: QtGui.QColor(100, 255, 100), node_db.PortDescription.ARATE_CONTROL: QtGui.QColor(100, 255, 180), node_db.PortDescription.KRATE_CONTROL: QtGui.QColor(100, 180, 255), node_db.PortDescription.EVENTS: QtGui.QColor(255, 180, 100), } class SelectColorAction(QtWidgets.QWidgetAction): colorSelected = QtCore.pyqtSignal(value_types.Color) def __init__(self, parent: QtCore.QObject) -> None: super().__init__(parent) self.setDefaultWidget(SelectColorWidget(parent=parent, action=self)) class ColorBox(QtWidgets.QWidget): clicked = QtCore.pyqtSignal() def __init__(self, color: value_types.Color, parent: QtWidgets.QWidget) -> None: super().__init__(parent) self.__color = color self.setFixedSize(24, 24) def paintEvent(self, event: QtGui.QPaintEvent) -> None: super().paintEvent(event) painter = QtGui.QPainter(self) try: painter.fillRect(self.rect(), Qt.black) painter.fillRect(self.rect().adjusted(1, 1, -1, -1), Qt.white) painter.fillRect(self.rect().adjusted(2, 2, -2, -2), QtGui.QColor.fromRgbF( self.__color.r, self.__color.g, self.__color.b, self.__color.a)) finally: painter.end() def mousePressEvent(self, event: QtGui.QMouseEvent) -> None: if event.button() == Qt.LeftButton: self.clicked.emit() class SelectColorWidget(QtWidgets.QWidget): colors = [ value_types.Color(0.7, 0.7, 0.7), value_types.Color(0.8, 0.8, 0.8), value_types.Color(0.9, 0.9, 0.9), value_types.Color(1.0, 1.0, 1.0), value_types.Color(1.0, 0.6, 0.6), value_types.Color(1.0, 0.7, 0.7), value_types.Color(1.0, 0.8, 0.8), value_types.Color(1.0, 0.9, 0.9), value_types.Color(1.0, 0.6, 0.1), value_types.Color(1.0, 0.7, 0.3), value_types.Color(1.0, 0.8, 0.6), value_types.Color(1.0, 0.9, 0.8), value_types.Color(0.6, 1.0, 0.6), value_types.Color(0.7, 1.0, 0.7), value_types.Color(0.8, 1.0, 0.8), value_types.Color(0.9, 1.0, 0.9), value_types.Color(0.6, 0.6, 1.0), value_types.Color(0.7, 0.7, 1.0), value_types.Color(0.8, 0.8, 1.0), value_types.Color(0.9, 0.9, 1.0), value_types.Color(1.0, 0.6, 1.0), value_types.Color(1.0, 0.7, 1.0), value_types.Color(1.0, 0.8, 1.0), value_types.Color(1.0, 0.9, 1.0), value_types.Color(1.0, 1.0, 0.6), value_types.Color(1.0, 1.0, 0.7), value_types.Color(1.0, 1.0, 0.8), value_types.Color(1.0, 1.0, 0.9), value_types.Color(0.6, 1.0, 1.0), value_types.Color(0.7, 1.0, 1.0), value_types.Color(0.8, 1.0, 1.0), value_types.Color(0.9, 1.0, 1.0), ] def __init__(self, *, action: SelectColorAction, **kwargs: Any) -> None: super().__init__(**kwargs) self.__action = action layout = QtWidgets.QGridLayout() layout.setContentsMargins(QtCore.QMargins(2, 2, 2, 2)) layout.setSpacing(2) self.setLayout(layout) for idx, color in enumerate(self.colors): w = ColorBox(color, self) w.clicked.connect(functools.partial(self.__action.colorSelected.emit, color)) layout.addWidget(w, idx // 8, idx % 8) class NodeProps(QtCore.QObject): contentRectChanged = QtCore.pyqtSignal(QtCore.QRectF) canvasLayoutChanged = QtCore.pyqtSignal() class Title(QtWidgets.QGraphicsSimpleTextItem): def __init__(self, name: str, parent: 'Node') -> None: super().__init__(parent) self.setText(name) self.setFlag(QtWidgets.QGraphicsItem.ItemClipsToShape, True) self.setAcceptedMouseButtons(Qt.LeftButton) self.__width = None # type: float def boundingRect(self) -> QtCore.QRectF: bounding_rect = super().boundingRect() if self.__width is not None: bounding_rect.setWidth(self.__width) return bounding_rect def shape(self) -> QtGui.QPainterPath: shape = QtGui.QPainterPath() shape.addRect(self.boundingRect()) return shape def setWidth(self, width: float) -> None: self.__width = width class Box(QtWidgets.QGraphicsPathItem): def mousePressEvent(self, event: QtWidgets.QGraphicsSceneMouseEvent) -> None: # swallow mouse press events (which aren't handled by some other of the # node's items) to prevent the canvas from triggering a rubber band # selection. event.accept() class NodeIcon(QtWidgets.QGraphicsItem): def __init__(self, icon: QtSvg.QSvgRenderer, parent: QtWidgets.QGraphicsItem) -> None: super().__init__(parent) self.__icon = icon self.__size = QtCore.QSizeF() self.__pixmap = None # type: QtGui.QPixmap def setRect(self, rect: QtCore.QRectF) -> None: self.prepareGeometryChange() self.setPos(rect.topLeft()) self.__size = rect.size() def boundingRect(self) -> QtCore.QRectF: return QtCore.QRectF(QtCore.QPointF(), self.__size) def paint( self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem, widget: Optional[QtWidgets.QWidget] = None) -> None: size = min(self.__size.width(), self.__size.height()) size = int(size - 0.4 * max(0, size - 50)) if size < 10: return pixmap_size = QtCore.QSize(size, size) if self.__pixmap is None or self.__pixmap.size() != pixmap_size: self.__pixmap = QtGui.QPixmap(pixmap_size) self.__pixmap.fill(QtGui.QColor(0, 0, 0, 0)) pixmap_painter = QtGui.QPainter(self.__pixmap) try: self.__icon.render(pixmap_painter, QtCore.QRectF(0, 0, size, size)) finally: pixmap_painter.end() painter.setOpacity(min(0.8, max(0.2, 0.8 - (size - 30) / 100))) painter.drawPixmap( int((self.__size.width() - size) / 2), int((self.__size.height() - size) / 2), self.__pixmap) class PortLabel(QtWidgets.QGraphicsRectItem): def __init__(self, port: 'Port') -> None: super().__init__() self.setZValue(100000) self.__text = QtWidgets.QGraphicsSimpleTextItem(self) tooltip = '%s: ' % port.name() tooltip += '/'.join( { node_db.PortDescription.AUDIO: "audio", node_db.PortDescription.KRATE_CONTROL: "k-rate control", node_db.PortDescription.ARATE_CONTROL: "a-rate control", node_db.PortDescription.EVENTS: "event", }[port_type] for port_type in port.possible_types()) tooltip += { node_db.PortDescription.INPUT: " input", node_db.PortDescription.OUTPUT: " output", }[port.direction()] self.__text.setText(tooltip) self.__text.setPos(4, 2) text_box = self.__text.boundingRect() pen = QtGui.QPen() pen.setColor(Qt.black) pen.setWidth(1) self.setPen(pen) self.setBrush(QtGui.QColor(255, 255, 200)) self.setRect(0, 0, text_box.width() + 8, text_box.height() + 4) class Port(QtWidgets.QGraphicsPathItem): def __init__(self, port_desc: node_db.PortDescription, parent: 'Node') -> None: super().__init__(parent) self.__desc = port_desc self.__node = parent.node() self.__listeners = core.ListenerList() self.__listeners.add( self.__node.connections_changed.add(lambda _: self.__update())) self.__target_type = None # type: node_db.PortDescription.Type self.__highlighted = False self.__tooltip = None # type: PortLabel def setup(self) -> None: self.__tooltip = PortLabel(self) self.scene().addItem(self.__tooltip) self.__update() def cleanup(self) -> None: if self.__tooltip is not None: self.scene().removeItem(self.__tooltip) self.__tooltip = None self.__listeners.cleanup() def name(self) -> str: return self.__desc.name def direction(self) -> node_db.PortDescription.Direction: return self.__desc.direction def current_type(self) -> node_db.PortDescription.Type: return self.__node.get_current_port_type(self.__desc.name) def possible_types(self) -> List['node_db.PortDescription.Type']: return self.__node.get_possible_port_types(self.__desc.name) def node(self) -> 'Node': return cast(Node, self.parentItem()) def highlighted(self) -> bool: return self.__highlighted def setHighlighted(self, highlighted: bool) -> None: self.__highlighted = highlighted self.__update() def setTargetType(self, target_type: node_db.PortDescription.Type) -> None: if self.__target_type == target_type: return self.__target_type = target_type self.__update() def clearTargetType(self) -> None: if self.__target_type is None: return self.__target_type = None self.__update() def canConnectTo(self, port: 'Port') -> bool: return music.can_connect_ports( self.__node, self.__desc.name, port.__node, port.__desc.name) def preferredConnectionType(self, port: 'Port') -> node_db.PortDescription.Type: return music.get_preferred_connection_type( self.__node, self.__desc.name, port.__node, port.__desc.name) def handleScenePos(self) -> QtCore.QPointF: if not self.isVisible(): return self.scenePos() elif self.__desc.direction == node_db.PortDescription.INPUT: return self.scenePos() + QtCore.QPointF(-10, 0) else: return self.scenePos() + QtCore.QPointF(10, 0) def descriptionChanged(self, port_desc: node_db.PortDescription) -> None: self.__desc = port_desc self.__update() def __update(self) -> None: color = port_colors[self.__target_type or self.current_type()] if self.__highlighted: self.setOpacity(1.0) self.__tooltip.setVisible(self.__highlighted) ttpos = self.scenePos() ttpos += QtCore.QPointF(0, -self.__tooltip.boundingRect().height() / 2) if self.__desc.direction == node_db.PortDescription.OUTPUT: ttpos += QtCore.QPointF(20, 0) else: ttpos -= QtCore.QPointF(20 + self.__tooltip.boundingRect().width(), 0) self.__tooltip.setPos(ttpos) else: self.setOpacity(0.7) self.__tooltip.setVisible(False) if self.__highlighted or self.__target_type is not None: pen = QtGui.QPen() pen.setColor(Qt.red) pen.setWidth(2) self.setPen(pen) self.setBrush(color) rect = QtCore.QRectF(-15, -12, 30, 24) else: pen = QtGui.QPen() pen.setColor(QtGui.QColor(80, 80, 200)) pen.setWidth(1) self.setPen(pen) self.setBrush(color) rect = QtCore.QRectF(-10, -8, 20, 16) path = QtGui.QPainterPath() if self.__desc.direction == node_db.PortDescription.INPUT: path.moveTo(0, rect.top()) path.arcTo(rect, 90, 180) else: path.moveTo(0, rect.top()) path.arcTo(rect, 90, -180) self.setPath(path) class Node(ui_base.ProjectMixin, core.AutoCleanupMixin, QtWidgets.QGraphicsItem): __next_zvalue = 2.0 has_window = False def __init__( self, *, node: music.BaseNode, icon: Optional[QtSvg.QSvgRenderer] = None, **kwargs: Any ) -> None: super().__init__(**kwargs) self.setZValue(1.0) self.setAcceptHoverEvents(True) self.setAcceptedMouseButtons(Qt.LeftButton) self.props = NodeProps() self.__session_prefix = 'node/%016x/' % node.id self.__listeners = core.ListenerList() self.add_cleanup_function(self.__listeners.cleanup) self.__node = node self.__window = None # type: QtWidgets.QWidget self.__box = Box(self) if icon is not None: self.__icon = NodeIcon(icon, self) else: self.__icon = None self.__ports = {} # type: Dict[str, Port] for port_desc in self.__node.description.ports: port = Port(port_desc, self) self.__ports[port_desc.name] = port self.__title = Title(self.__node.name, self) self.__title_edit = QtWidgets.QLineEdit() self.__title_edit.editingFinished.connect(self.__renameNodeFinished) self.__title_edit_proxy = QtWidgets.QGraphicsProxyWidget(self) self.__title_edit_proxy.setWidget(self.__title_edit) self.__title_widgets_proxy = None self.__title_widgets_container = None title_widgets = list(self.titleWidgets()) if title_widgets: self.__title_widgets_container = QtWidgets.QWidget() self.__title_widgets_container.setAutoFillBackground(False) self.__title_widgets_container.setAttribute(Qt.WA_NoSystemBackground, True) layout = QtWidgets.QHBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(1) for widget in title_widgets: layout.addWidget(widget) self.__title_widgets_container.setLayout(layout) self.__title_widgets_proxy = QtWidgets.QGraphicsProxyWidget(self) self.__title_widgets_proxy.setWidget(self.__title_widgets_container) self.__body_proxy = None # type: QtWidgets.QGraphicsProxyWidget self.__body = self.createBodyWidget() if self.__body is not None: self.__body.setAutoFillBackground(False) self.__body.setAttribute(Qt.WA_NoSystemBackground, True) self.__body_proxy = QtWidgets.QGraphicsProxyWidget(self) self.__body_proxy.setWidget(self.__body) self.__transform = QtGui.QTransform() self.__canvas_rect = self.__transform.mapRect(self.contentRect()) self.__selected = False self.__hovered = False self.__rename_node = False self.__drag_rect = QtCore.QRectF() self.__listeners.add( self.__node.name_changed.add(self.__nameChanged)) self.__listeners.add( self.__node.graph_pos_changed.add(self.__graphRectChanged)) self.__listeners.add( self.__node.graph_size_changed.add(self.__graphRectChanged)) self.__listeners.add( self.__node.graph_color_changed.add(lambda *_: self.__updateState())) self.__listeners.add( self.__node.port_properties_changed.add(lambda *_: self.__layout())) self.__listeners.add( self.__node.description_changed.add(lambda *_: self.__descriptionChanged())) self.__state = None # type: audioproc.NodeStateChange.State self.__listeners.add( self.audioproc_client.node_state_changed.add( '%08x' % self.__node.id, self.__stateChanged)) self.__updateState() def __str__(self) -> str: return '<node name=%r> ' % self.__node.name @property def __in_ports(self) -> List[node_db.PortDescription]: return [ port_desc for port_desc in self.__node.description.ports if port_desc.direction == node_db.PortDescription.INPUT ] @property def __out_ports(self) -> List[node_db.PortDescription]: return [ port_desc for port_desc in self.__node.description.ports if port_desc.direction == node_db.PortDescription.OUTPUT ] def __nameChanged(self, *args: Any) -> None: self.__title.setText(self.__node.name) def __graphRectChanged(self, *args: Any) -> None: self.__canvas_rect = self.__transform.mapRect(self.contentRect()) self.__layout() self.props.contentRectChanged.emit(self.contentRect()) def createBodyWidget(self) -> QtWidgets.QWidget: return None def createWindow(self, **kwargs: Any) -> QtWidgets.QWidget: raise RuntimeError("Node %s does not support windows." % type(self).__name__) def titleWidgets(self) -> Iterable[QtWidgets.QWidget]: if self.__node.description.node_ui.muteable: muted_button = mute_button.MuteButton() muted_button.toggled.connect( lambda muted: self.project_client.set_session_value( self.__session_prefix + 'muted', muted)) muted_button.setChecked( self.project_client.get_session_value( self.__session_prefix + 'muted', False)) self.project_client.add_session_data_listener( self.__session_prefix + 'muted', muted_button.setChecked) yield muted_button if self.__node.removable: remove_button = QtWidgets.QToolButton() remove_button.setAutoRaise(True) remove_button.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'window-close.svg'))) remove_button.clicked.connect(self.onRemove) yield remove_button def setup(self) -> None: for port in self.__ports.values(): port.setup() def cleanup(self) -> None: for port in self.__ports.values(): port.cleanup() self.__ports.clear() if self.__window is not None: self.__window.close() self.__window = None super().cleanup() def node(self) -> music.BaseNode: return self.__node def id(self) -> int: return self.__node.id def name(self) -> str: return self.__node.name def graph_pos(self) -> value_types.Pos2F: return self.__node.graph_pos def graph_size(self) -> value_types.SizeF: return self.__node.graph_size def ports(self) -> Iterable[Port]: for port_desc in self.__node.description.ports: yield self.__ports[port_desc.name] def upstream_nodes(self) -> List[music.BaseNode]: return self.__node.upstream_nodes() def selected(self) -> bool: return self.__selected def setSelected(self, selected: bool) -> None: self.__selected = selected self.__updateState() def port(self, port_name: str) -> Port: return self.__ports[port_name] def portHandleScenePos(self, port_name: str) -> QtCore.QPointF: return self.__ports[port_name].handleScenePos() def contentTopLeft(self) -> QtCore.QPointF: return QtCore.QPointF(self.__node.graph_pos.x, self.__node.graph_pos.y) def contentSize(self) -> QtCore.QSizeF: return QtCore.QSizeF(self.__node.graph_size.width, self.__node.graph_size.height) def contentRect(self) -> QtCore.QRectF: return QtCore.QRectF(self.contentTopLeft(), self.contentSize()) def canvasTopLeft(self) -> QtCore.QPointF: return self.__canvas_rect.topLeft() def setCanvasTopLeft(self, pos: QtCore.QPointF) -> None: self.__canvas_rect.moveTopLeft(pos) self.__layout() def setCanvasRect(self, rect: QtCore.QRectF) -> None: self.__canvas_rect = rect self.__layout() def canvasRect(self) -> QtCore.QRectF: return self.__canvas_rect def setCanvasTransform(self, transform: QtGui.QTransform) -> None: self.__transform = transform self.__canvas_rect = self.__transform.mapRect(self.contentRect()) self.__layout() def resizeSide(self, pos: QtCore.QPointF) -> Optional[str]: t = self.__canvas_rect.top() b = self.__canvas_rect.bottom() l = self.__canvas_rect.left() r = self.__canvas_rect.right() w = self.__canvas_rect.width() h = self.__canvas_rect.height() resize_rects = { 'top': QtCore.QRectF(l + 4, t, w - 8, 4), 'bottom': QtCore.QRectF(l + 10, b - 10, w - 20, 10), 'left': QtCore.QRectF(l, t + 4, 4, h - 14), 'right': QtCore.QRectF(r - 4, t + 4, 4, h - 14), 'topleft': QtCore.QRectF(l, t, 4, 4), 'topright': QtCore.QRectF(r - 4, t, 4, 4), 'bottomleft': QtCore.QRectF(l, b - 10, 10, 10), 'bottomright': QtCore.QRectF(r - 10, b - 10, 10, 10), } for side, rect in resize_rects.items(): if rect.contains(pos): return side return None def dragRect(self) -> QtCore.QRectF: return self.__drag_rect def boundingRect(self) -> QtCore.QRectF: return self.__box.boundingRect() def __descriptionChanged(self) -> None: ports = {} for port_desc in self.__node.description.ports: if port_desc.name not in self.__ports: port = Port(port_desc, self) port.setup() else: port = self.__ports[port_desc.name] port.descriptionChanged(port_desc) ports[port_desc.name] = port for port_name, port in self.__ports.items(): if port_name not in ports: port.cleanup() port.setParentItem(None) self.scene().removeItem(port) self.__ports = ports self.__layout() def __stateChanged(self, state_change: audioproc.NodeStateChange) -> None: if state_change.HasField('state'): self.__state = state_change.state self.__updateState() def __updateState(self) -> None: if self.__selected or self.__hovered: opacity = 1.0 else: opacity = 0.7 self.__box.setOpacity(opacity) for port in self.__ports.values(): if not port.highlighted(): port.setOpacity(opacity) if self.__state == audioproc.NodeStateChange.BROKEN: pen = QtGui.QPen() pen.setColor(Qt.black) pen.setWidth(2) self.__box.setPen(pen) self.__box.setBrush(QtGui.QColor(255, 0, 0)) elif self.__selected: pen = QtGui.QPen() pen.setColor(QtGui.QColor(80, 80, 200)) pen.setWidth(2) self.__box.setPen(pen) self.__box.setBrush(QtGui.QColor(150, 150, 255)) else: pen = QtGui.QPen() pen.setColor(Qt.black) pen.setWidth(2) self.__box.setPen(pen) self.__box.setBrush(QtGui.QColor.fromRgbF( self.__node.graph_color.r, self.__node.graph_color.g, self.__node.graph_color.b, self.__node.graph_color.a)) def __layout(self) -> None: self.setPos(self.__canvas_rect.topLeft()) w, h = self.__canvas_rect.width(), self.__canvas_rect.height() path = QtGui.QPainterPath() path.addRoundedRect(0, 0, w, h, 5, 5) self.__box.setPath(path) visible_in_ports = [] for desc in self.__in_ports: port_properties = self.__node.get_port_properties(desc.name) if not port_properties.exposed: port = self.__ports[desc.name] port.setVisible(False) continue visible_in_ports.append(desc) show_ports = (0.5 * h > 10 * max(len(visible_in_ports), len(self.__out_ports))) for idx, desc in enumerate(visible_in_ports): port = self.__ports[desc.name] if len(visible_in_ports) > 1: y = h * (0.5 * idx / (len(visible_in_ports) - 1) + 0.25) else: y = h * 0.5 port.setPos(0, y) port.setVisible(show_ports) for idx, desc in enumerate(self.__out_ports): port = self.__ports[desc.name] if len(self.__out_ports) > 1: y = h * (0.5 * idx / (len(self.__out_ports) - 1) + 0.25) else: y = h * 0.5 port.setPos(w, y) port.setVisible(show_ports) if self.__rename_node: title_h = self.__title_edit_proxy.minimumHeight() + 4 self.__title_edit_proxy.setVisible(True) self.__title_edit_proxy.setPos(4, 4) self.__title_edit_proxy.resize(w - 8, self.__title_edit_proxy.minimumHeight()) else: title_h = 24 self.__title_edit_proxy.setVisible(False) if self.__title_widgets_proxy is not None: if (h > self.__title_widgets_container.height() + 2 and w > self.__title_widgets_container.width() + 40 and not self.__rename_node): self.__title_widgets_proxy.setVisible(True) self.__title_widgets_proxy.setPos( w - self.__title_widgets_container.width() - 4, 2) title_h = self.__title_widgets_container.height() + 4 else: self.__title_widgets_proxy.setVisible(False) if h > 20 and not self.__rename_node: self.__title.setVisible(True) self.__title.setPos(8, (title_h - 2 - self.__title.boundingRect().height()) / 2) if self.__title_widgets_proxy is not None and self.__title_widgets_proxy.isVisible(): self.__title.setWidth(self.__title_widgets_proxy.pos().x() - 8) else: self.__title.setWidth(w - 16) else: self.__title.setVisible(False) if self.__icon is not None: if self.__title.isVisible(): icon_y = 24 else: icon_y = 3 self.__icon.setRect(QtCore.QRectF(3, icon_y, w - 6, h - icon_y - 6)) if self.__body_proxy is not None: bsize = self.__body_proxy.minimumSize() if h > bsize.height() + (title_h + 4) and w > bsize.width() + 8: self.__body_proxy.setVisible(True) self.__body_proxy.setPos(4, title_h) self.__body_proxy.resize(w - 8, h - (title_h + 4)) else: self.__body_proxy.setVisible(False) if self.__title_edit_proxy.isVisible(): drag_rect_width, drag_rect_height = 0.0, 0.0 else: if self.__body_proxy is not None and self.__body_proxy.isVisible(): drag_rect_height = title_h else: drag_rect_height = h if self.__title_widgets_proxy is not None and self.__title_widgets_proxy.isVisible(): drag_rect_width = self.__title_widgets_proxy.pos().x() else: drag_rect_width = w self.__drag_rect = QtCore.QRectF(0, 0, drag_rect_width, drag_rect_height) self.props.canvasLayoutChanged.emit() def paint( self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem, widget: Optional[QtWidgets.QWidget] = None) -> None: pass def mousePressEvent(self, event: QtWidgets.QGraphicsSceneMouseEvent) -> None: self.setZValue(Node.__next_zvalue) Node.__next_zvalue += 1 event.ignore() super().mousePressEvent(event) def hoverEnterEvent(self, event: QtWidgets.QGraphicsSceneHoverEvent) -> None: self.__hovered = True self.__updateState() return super().hoverEnterEvent(event) def hoverLeaveEvent(self, event: QtWidgets.QGraphicsSceneHoverEvent) -> None: self.__hovered = False self.__updateState() return super().hoverLeaveEvent(event) def buildContextMenu(self, menu: QtWidgets.QMenu) -> None: if self.has_window: show_window = menu.addAction("Open in window") show_window.triggered.connect(self.onShowWindow) if self.__node.removable: remove = menu.addAction("Remove") remove.triggered.connect(self.onRemove) rename = menu.addAction("Rename") rename.triggered.connect(self.renameNode) color_menu = menu.addMenu("Set color") color_action = SelectColorAction(color_menu) color_action.colorSelected.connect(self.onSetColor) color_menu.addAction(color_action) def onShowWindow(self) -> None: if self.__window is None: self.__window = self.createWindow(parent=self.project_view) self.__window.show() self.__window.raise_() self.__window.activateWindow() def onRemove(self) -> None: with self.project.apply_mutations('Remove node %s' % self.__node.name): for conn in self.__node.connections: self.project.remove_node_connection(conn) self.project.remove_node(self.__node) def onSetColor(self, color: value_types.Color) -> None: if color != self.__node.graph_color: with self.project.apply_mutations('%s: Set color' % self.__node.name): self.__node.graph_color = color def renameNode(self) -> None: self.__rename_node = True self.__title_edit.setText(self.__node.name) self.__title_edit.setFocus() self.__title_edit.selectAll() self.__layout() def __renameNodeFinished(self) -> None: new_name = self.__title_edit.text() if new_name != self.__node.name: self.__title.setText(self.__node.name) with self.project.apply_mutations('%s: Rename to "%s"' % (self.__node.name, new_name)): self.__node.name = new_name self.__rename_node = False self.__layout() class Connection(ui_base.ProjectMixin, QtWidgets.QGraphicsPathItem): def __init__( self, *, connection: music.NodeConnection, src_node: Node, dest_node: Node, **kwargs: Any) -> None: super().__init__(**kwargs) self.__connection = connection self.__src_node = src_node self.__dest_node = dest_node self.__highlighted = False self.__src_node_canvas_layout_changed_connection = \ self.__src_node.props.canvasLayoutChanged.connect(self.__update) self.__dest_node_canvas_layout_changed_connection = \ self.__dest_node.props.canvasLayoutChanged.connect(self.__update) self.__update() def cleanup(self) -> None: if self.__src_node_canvas_layout_changed_connection is not None: self.__src_node.props.canvasLayoutChanged.disconnect( self.__src_node_canvas_layout_changed_connection) self.__src_node_canvas_layout_changed_connection = None if self.__dest_node_canvas_layout_changed_connection is not None: self.__dest_node.props.canvasLayoutChanged.disconnect( self.__dest_node_canvas_layout_changed_connection) self.__dest_node_canvas_layout_changed_connection = None def connection(self) -> music.NodeConnection: return self.__connection def id(self) -> int: return self.__connection.id def src_node(self) -> Node: return self.__src_node def src_port(self) -> Port: return self.__src_node.port(self.__connection.source_port) def dest_node(self) -> Node: return self.__dest_node def dest_port(self) -> Port: return self.__dest_node.port(self.__connection.dest_port) def setHighlighted(self, highlighted: bool) -> None: self.__highlighted = highlighted self.__update() def __update(self) -> None: color = port_colors[self.__connection.type] if self.__highlighted: pen = QtGui.QPen() pen.setColor(color) pen.setWidth(4) self.setPen(pen) else: pen = QtGui.QPen() pen.setColor(color) pen.setWidth(2) self.setPen(pen) pos1 = self.__src_node.portHandleScenePos(self.__connection.source_port) pos2 = self.__dest_node.portHandleScenePos(self.__connection.dest_port) cpos = QtCore.QPointF(min(100, abs(pos2.x() - pos1.x()) / 2), 0) path = QtGui.QPainterPath() path.moveTo(pos1) path.cubicTo(pos1 + cpos, pos2 - cpos, pos2) self.setPath(path)
gpl-2.0
6,715,687,354,517,147,000
33.472672
99
0.580023
false
3.723516
false
false
false
chromium/chromium
tools/android/modularization/convenience/build_gn_editor.py
6
10119
# Lint as: python3 # Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. r'''Helper code to handle editing BUILD.gn files.''' from __future__ import annotations import difflib import pathlib import re import subprocess from typing import List, Optional, Tuple def _find_block(source: str, start: int, open_delim: str, close_delim: str) -> Tuple[int, int]: open_delim_pos = source[start:].find(open_delim) if open_delim_pos < 0: return (-1, -1) baseline = start + open_delim_pos delim_count = 1 for i, char in enumerate(source[baseline + 1:]): if char == open_delim: delim_count += 1 continue if char == close_delim: delim_count -= 1 if delim_count == 0: return (baseline, baseline + i + 1) return (baseline, -1) def _find_line_end(source: str, start: int) -> int: pos = source[start:].find('\n') if pos < 0: return -1 return start + pos class BuildFileUpdateError(Exception): """Represents an error updating the build file.""" def __init__(self, message: str): super().__init__() self._message = message def __str__(self): return self._message class VariableContentList(object): """Contains the elements of a list assigned to a variable in a gn target. Example: target_type("target_name") { foo = [ "a", "b", "c", ] } This class represents the elements "a", "b", "c" for foo. """ def __init__(self): self._elements = [] def parse_from(self, content: str) -> bool: """Parses list elements from content and returns True on success. The expected list format must be a valid gn list. i.e. 1. [] 2. [ "foo" ] 3. [ "foo", "bar", ... ] """ start = content.find('[') if start < 0: return False end = start + content[start:].find(']') if end <= start: return False bracketless_content = content[start + 1:end].strip() if not bracketless_content: return True whitespace = re.compile(r'^\s+', re.MULTILINE) comma = re.compile(r',$', re.MULTILINE) self._elements = list( dict.fromkeys( re.sub(comma, '', re.sub(whitespace, '', bracketless_content)).split('\n'))) return True def get_elements(self) -> List[str]: return self._elements def add_elements(self, elements: List[str]) -> None: """Appends unique elements to the existing list.""" if not self._elements: self._elements = list(dict.fromkeys(elements)) return all_elements = list(self._elements) all_elements.extend(elements) self._elements = list(dict.fromkeys(all_elements)) def add_list(self, other: VariableContentList) -> None: """Appends unique elements to the existing list.""" self.add_elements(other.get_elements()) def serialize(self) -> str: if not self._elements: return '[]\n' return '[\n' + ',\n'.join(self._elements) + ',\n]' class TargetVariable: """Contains the name of a variable and its contents in a gn target. Example: target_type("target_name") { variable_name = variable_content } This class represents the variable_name and variable_content. """ def __init__(self, name: str, content: str): self._name = name self._content = content def get_name(self) -> str: return self._name def get_content(self) -> str: return self._content def get_content_as_list(self) -> Optional[VariableContentList]: """Returns the variable's content if it can be represented as a list.""" content_list = VariableContentList() if content_list.parse_from(self._content): return content_list return None def is_list(self) -> bool: """Returns whether the variable's content is represented as a list.""" return self.get_content_as_list() is not None def set_content_from_list(self, content_list: VariableContentList) -> None: self._content = content_list.serialize() def set_content(self, content: str) -> None: self._content = content def serialize(self) -> str: return f'\n{self._name} = {self._content}\n' class BuildTarget: """Contains the target name, type and content of a gn target. Example: target_type("target_name") { <content> } This class represents target_type, target_name and arbitrary content. Specific variables are accessible via this class by name although only the basic 'foo = "bar"' and 'foo = [ "bar", "baz", ]' formats are supported, not more complex things like += or conditionals. """ def __init__(self, target_type: str, target_name: str, content: str): self._target_type = target_type self._target_name = target_name self._content = content def get_name(self) -> str: return self._target_name def get_type(self) -> str: return self._target_type def get_variable(self, variable_name: str) -> Optional[TargetVariable]: pattern = re.compile(fr'^\s*{variable_name} = ', re.MULTILINE) match = pattern.search(self._content) if not match: return None start = match.end() - 1 end = start if self._content[match.end()] == '[': start, end = _find_block(self._content, start, '[', ']') else: end = _find_line_end(self._content, start) if end <= start: return None return TargetVariable(variable_name, self._content[start:end + 1]) def add_variable(self, variable: TargetVariable) -> None: """Adds the variable to the end of the content. Warning: this does not check for prior existence.""" self._content += variable.serialize() def replace_variable(self, variable: TargetVariable) -> None: """Replaces an existing variable and returns True on success.""" pattern = re.compile(fr'^\s*{variable.get_name()} =', re.MULTILINE) match = pattern.search(self._content) if not match: raise BuildFileUpdateError( f'{self._target_type}("{self._target_name}") variable ' f'{variable.get_name()} not found. Unable to replace.') start = match.end() if variable.is_list(): start, end = _find_block(self._content, start, '[', ']') else: end = _find_line_end(self._content, start) if end <= match.start(): raise BuildFileUpdateError( f'{self._target_type}("{self._target_name}") variable ' f'{variable.get_name()} invalid. Unable to replace.') self._content = (self._content[:match.start()] + variable.serialize() + self._content[end + 1:]) def serialize(self) -> str: return (f'\n{self._target_type}("{self._target_name}") {{\n' + f'{self._content}\n}}\n') class BuildFile: """Represents the contents of a BUILD.gn file. This supports modifying or adding targets to the file at a basic level. """ def __init__(self, build_gn_path: pathlib.Path): self._path = build_gn_path with open(self._path, 'r') as build_gn_file: self._content = build_gn_file.read() def get_target_names_of_type(self, target_type: str) -> List[str]: """Lists all targets in the build file of target_type.""" pattern = re.compile(fr'^\s*{target_type}\(\"(\w+)\"\)', re.MULTILINE) return pattern.findall(self._content) def get_target(self, target_type: str, target_name: str) -> Optional[BuildTarget]: pattern = re.compile(fr'^\s*{target_type}\(\"{target_name}\"\)', re.MULTILINE) match = pattern.search(self._content) if not match: return None start, end = _find_block(self._content, match.end(), '{', '}') if end <= start: return None return BuildTarget(target_type, target_name, self._content[start + 1:end]) def get_path(self) -> pathlib.Path: return self._path def get_content(self) -> str: return self._content def get_diff(self) -> str: with open(self._path, 'r') as build_gn_file: disk_content = build_gn_file.read() return ''.join( difflib.unified_diff(disk_content.splitlines(keepends=True), self._content.splitlines(keepends=True), fromfile=f'{self._path}', tofile=f'{self._path}')) def add_target(self, target: BuildTarget) -> None: """Adds the target to the end of the content. Warning: this does not check for prior existence.""" self._content += target.serialize() def replace_target(self, target: BuildTarget) -> None: """Replaces an existing target and returns True on success.""" pattern = re.compile(fr'^\s*{target.get_type()}\(\"{target.get_name()}\"\)', re.MULTILINE) match = pattern.search(self._content) if not match: raise BuildFileUpdateError( f'{target.get_type()}("{target.get_name()}") not found. ' 'Unable to replace.') start, end = _find_block(self._content, match.end(), '{', '}') if end <= start: raise BuildFileUpdateError( f'{target.get_type()}("{target.get_name()}") invalid. ' 'Unable to replace.') self._content = (self._content[:match.start()] + target.serialize() + self._content[end + 1:]) def format_content(self) -> None: process = subprocess.Popen(['gn', 'format', '--stdin'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) stdout_data, stderr_data = process.communicate(input=self._content.encode()) if process.returncode: raise BuildFileUpdateError( 'Formatting failed. There was likely an error in the changes ' '(this program cannot handle complex BUILD.gn files).\n' f'stderr: {stderr_data.decode()}') self._content = stdout_data.decode() def write_content_to_file(self) -> None: with open(self._path, 'w+') as build_gn_file: build_gn_file.write(self._content)
bsd-3-clause
719,191,919,956,314,500
29.20597
80
0.612709
false
3.764509
false
false
false
qedsoftware/commcare-hq
corehq/apps/hqmedia/views.py
1
22884
from StringIO import StringIO from mimetypes import guess_all_extensions, guess_type import uuid import zipfile import logging import os from django.contrib.auth.decorators import login_required import json import itertools from django.conf import settings from django.core.urlresolvers import reverse from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View, TemplateView from couchdbkit.exceptions import ResourceNotFound from django.http import HttpResponse, Http404, HttpResponseServerError, HttpResponseBadRequest from django.shortcuts import render import shutil from corehq import privileges from corehq.util.files import file_extention_from_filename from soil import DownloadBase from corehq.apps.accounting.utils import domain_has_privilege from corehq.apps.app_manager.decorators import safe_download from corehq.apps.app_manager.view_helpers import ApplicationViewMixin from corehq.apps.hqmedia.cache import BulkMultimediaStatusCache, BulkMultimediaStatusCacheNfs from corehq.apps.hqmedia.controller import ( MultimediaBulkUploadController, MultimediaImageUploadController, MultimediaAudioUploadController, MultimediaVideoUploadController ) from corehq.apps.hqmedia.decorators import login_with_permission_from_post from corehq.apps.hqmedia.models import CommCareImage, CommCareAudio, CommCareMultimedia, MULTIMEDIA_PREFIX, CommCareVideo from corehq.apps.hqmedia.tasks import process_bulk_upload_zip, build_application_zip from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import Permissions from dimagi.utils.decorators.memoized import memoized from dimagi.utils.django.cached_object import CachedObject from soil.util import expose_cached_download from django.utils.translation import ugettext as _ from django_prbac.decorators import requires_privilege_raise404 class BaseMultimediaView(ApplicationViewMixin, View): @method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_with_permission_from_post())) def dispatch(self, request, *args, **kwargs): return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs) class BaseMultimediaTemplateView(BaseMultimediaView, TemplateView): """ The base view for all the multimedia templates. """ @property def page_context(self): return {} def get_context_data(self, **kwargs): context = { "domain": self.domain, "app": self.app, } context.update(self.page_context) return context def render_to_response(self, context, **response_kwargs): return render(self.request, self.template_name, context) class BaseMultimediaUploaderView(BaseMultimediaTemplateView): @property def page_context(self): return { 'uploaders': self.upload_controllers, "sessionid": self.request.COOKIES.get('sessionid'), } @property def upload_controllers(self): """ Return a list of Upload Controllers """ raise NotImplementedError("You must specify a list of upload controllers") class MultimediaReferencesView(BaseMultimediaUploaderView): name = "hqmedia_references" template_name = "hqmedia/references.html" @property def page_context(self): context = super(MultimediaReferencesView, self).page_context if self.app is None: raise Http404(self) context.update({ "references": self.app.get_references(), "object_map": self.app.get_object_map(), "totals": self.app.get_reference_totals(), "sessionid": self.request.COOKIES.get('sessionid'), }) return context @property def upload_controllers(self): return [ MultimediaImageUploadController("hqimage", reverse(ProcessImageFileUploadView.name, args=[self.domain, self.app_id])), MultimediaAudioUploadController("hqaudio", reverse(ProcessAudioFileUploadView.name, args=[self.domain, self.app_id])), MultimediaVideoUploadController("hqvideo", reverse(ProcessVideoFileUploadView.name, args=[self.domain, self.app_id])), ] class BulkUploadMultimediaView(BaseMultimediaUploaderView): name = "hqmedia_bulk_upload" template_name = "hqmedia/bulk_upload.html" @property def upload_controllers(self): return [MultimediaBulkUploadController("hqmedia_bulk", reverse(ProcessBulkUploadView.name, args=[self.domain, self.app_id]))] class BadMediaFileException(Exception): pass class BaseProcessUploadedView(BaseMultimediaView): @property def username(self): return self.request.couch_user.username if self.request.couch_user else None @property def share_media(self): return self.request.POST.get('shared') == 't' @property def license_used(self): return self.request.POST.get('license', '') @property def author(self): return self.request.POST.get('author', '') @property def attribution_notes(self): return self.request.POST.get('attribution-notes', '') @property @memoized def uploaded_file(self): return self.request.FILES.get('Filedata') @property @memoized def mime_type(self): try: data = self.uploaded_file.file.read() return CommCareMultimedia.get_mime_type(data, filename=self.uploaded_file.name) except Exception as e: raise BadMediaFileException("There was an error fetching the MIME type of your file. Error: %s" % e) @method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_with_permission_from_post())) # YUI js uploader library doesn't support csrf @csrf_exempt def dispatch(self, request, *args, **kwargs): return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): return HttpResponseBadRequest("You may only post to this URL.") def post(self, request, *args, **kwargs): self.errors = [] response = {} try: self.validate_file() response.update(self.process_upload()) except BadMediaFileException as e: self.errors.append(e.message) response.update({ 'errors': self.errors, }) return HttpResponse(json.dumps(response)) def validate_file(self, replace_diff_ext=False): raise NotImplementedError("You must validate your uploaded file!") def process_upload(self): raise NotImplementedError("You definitely need to implement this guy.") class ProcessBulkUploadView(BaseProcessUploadedView): name = "hqmedia_uploader_bulk" @property @memoized def uploaded_zip(self): try: self.uploaded_file.file.seek(0) return zipfile.ZipFile(self.uploaded_file) except Exception as e: raise BadMediaFileException("There was an issue processing the zip file you provided. Error: %s" % e) def validate_file(self, replace_diff_ext=False): if not self.mime_type in self.valid_mime_types(): raise BadMediaFileException("Your zip file doesn't have a valid mimetype.") if not self.uploaded_zip: raise BadMediaFileException("There is no ZIP file.") if self.uploaded_zip.testzip(): raise BadMediaFileException("The ZIP file provided was bad.") def process_upload(self): if hasattr(self.uploaded_file, 'temporary_file_path') and settings.SHARED_DRIVE_CONF.temp_dir: processing_id = uuid.uuid4().hex path = settings.SHARED_DRIVE_CONF.get_temp_file(suffix='.upload') shutil.move(self.uploaded_file.temporary_file_path(), path) status = BulkMultimediaStatusCacheNfs(processing_id, path) status.save() else: self.uploaded_file.file.seek(0) saved_file = expose_cached_download( self.uploaded_file.file.read(), expiry=BulkMultimediaStatusCache.cache_expiry, file_extension=file_extention_from_filename(self.uploaded_file.name), ) processing_id = saved_file.download_id status = BulkMultimediaStatusCache(processing_id) status.save() process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id, username=self.username, share_media=self.share_media, license_name=self.license_used, author=self.author, attribution_notes=self.attribution_notes) return status.get_response() @classmethod def valid_mime_types(cls): return [ 'application/zip', 'application/x-zip', 'application/octet-stream', 'application/x-zip-compressed', ] class BaseProcessFileUploadView(BaseProcessUploadedView): media_class = None @property def form_path(self): return self.request.POST.get('path', '') @property def original_path(self): return self.request.POST.get('originalPath') @property def file_ext(self): def file_ext(filename): _, extension = os.path.splitext(filename) return extension return file_ext(self.uploaded_file.name) @property def orig_ext(self): if self.original_path is None: return self.file_ext return '.{}'.format(self.original_path.split('.')[-1]) def validate_file(self, replace_diff_ext=False): def possible_extensions(filename): possible_type = guess_type(filename)[0] if not possible_type: return [] return guess_all_extensions(guess_type(filename)[0]) if not self.mime_type: raise BadMediaFileException(_("Did not process a mime type!")) base_type = self.mime_type.split('/')[0] if base_type not in self.valid_base_types(): raise BadMediaFileException( _("Not a valid %s file.") % self.media_class.get_nice_name().lower() ) if self.file_ext.lower() not in possible_extensions(self.form_path): raise BadMediaFileException( _("File {name} has an incorrect file type {ext}.").format( name=self.uploaded_file.name, ext=self.file_ext, ) ) if not replace_diff_ext and self.file_ext.lower() != self.orig_ext.lower(): raise BadMediaFileException(_( "The file type of {name} of '{ext}' does not match the " "file type of the original media file '{orig_ext}'. To change " "file types, please upload directly from the " "Form Builder." ).format( name=self.uploaded_file.name, ext=self.file_ext.lower(), orig_ext=self.orig_ext.lower(), )) def process_upload(self): self.uploaded_file.file.seek(0) self.data = self.uploaded_file.file.read() multimedia = self.media_class.get_by_data(self.data) multimedia.attach_data(self.data, original_filename=self.uploaded_file.name, username=self.username) multimedia.add_domain(self.domain, owner=True) if self.share_media: multimedia.update_or_add_license(self.domain, type=self.license_used, author=self.author, attribution_notes=self.attribution_notes) self.app.create_mapping(multimedia, self.form_path) return { 'ref': multimedia.get_media_info(self.form_path), } @classmethod def valid_base_types(cls): raise NotImplementedError("You need to specify a list of valid base mime types!") class ProcessImageFileUploadView(BaseProcessFileUploadView): media_class = CommCareImage name = "hqmedia_uploader_image" @classmethod def valid_base_types(cls): return ['image'] class ProcessLogoFileUploadView(ProcessImageFileUploadView): name = "hqmedia_uploader_logo" @method_decorator(requires_privilege_raise404(privileges.COMMCARE_LOGO_UPLOADER)) def post(self, request, *args, **kwargs): return super(ProcessLogoFileUploadView, self).post(request, *args, **kwargs) @property def form_path(self): return ("jr://file/commcare/logo/data/%s%s" % (self.filename, self.file_ext)) def validate_file(self, replace_diff_ext=True): return super(ProcessLogoFileUploadView, self).validate_file(replace_diff_ext) @property def filename(self): return self.kwargs.get('logo_name') def process_upload(self): if self.app.logo_refs is None: self.app.logo_refs = {} ref = super( ProcessLogoFileUploadView, self ).process_upload() self.app.logo_refs[self.filename] = ref['ref'] self.app.save() return ref class ProcessAudioFileUploadView(BaseProcessFileUploadView): media_class = CommCareAudio name = "hqmedia_uploader_audio" @classmethod def valid_base_types(cls): return ['audio'] class ProcessVideoFileUploadView(BaseProcessFileUploadView): media_class = CommCareVideo name = "hqmedia_uploader_video" @classmethod def valid_base_types(cls): return ['video'] class ProcessTextFileUploadView(BaseProcessFileUploadView): media_class = CommCareMultimedia name = "hqmedia_uploader_text" @classmethod def valid_base_types(cls): return ['text'] class RemoveLogoView(BaseMultimediaView): name = "hqmedia_remove_logo" @property def logo_slug(self): if self.request.method == 'POST': return self.request.POST.get('logo_slug') return None @method_decorator(requires_privilege_raise404(privileges.COMMCARE_LOGO_UPLOADER)) def post(self, *args, **kwargs): if self.logo_slug in self.app.logo_refs: del self.app.logo_refs[self.logo_slug] self.app.save() return HttpResponse() class CheckOnProcessingFile(BaseMultimediaView): name = "hqmedia_check_processing" def get(self, request, *args, **kwargs): return HttpResponse("workin on it") def iter_media_files(media_objects): """ take as input the output of get_media_objects and return an iterator of (path, data) tuples for the media files as they should show up in the .zip as well as a list of error messages as a side effect of implementation, errors will not include all error messages until the iterator is exhausted """ errors = [] def _media_files(): for path, media in media_objects: try: data, _ = media.get_display_file() folder = path.replace(MULTIMEDIA_PREFIX, "") if not isinstance(data, unicode): yield os.path.join(folder), data except NameError as e: errors.append("%(path)s produced an ERROR: %(error)s" % { 'path': path, 'error': e, }) return _media_files(), errors def iter_app_files(app, include_multimedia_files, include_index_files, build_profile_id=None): file_iterator = [] errors = [] if include_multimedia_files: app.remove_unused_mappings() languages = None if build_profile_id is not None: languages = app.build_profiles[build_profile_id].langs file_iterator, errors = iter_media_files(app.get_media_objects(languages=languages)) if include_index_files: index_files, index_file_errors = iter_index_files(app, build_profile_id=build_profile_id) if index_file_errors: errors.extend(index_file_errors) file_iterator = itertools.chain(file_iterator, index_files) return file_iterator, errors class DownloadMultimediaZip(View, ApplicationViewMixin): """ This is where the Multimedia for an application gets generated. Expects domain and app_id to be in its args """ name = "download_multimedia_zip" compress_zip = False zip_name = 'commcare.zip' include_multimedia_files = True include_index_files = False def check_before_zipping(self): if not self.app.multimedia_map and self.include_multimedia_files: return HttpResponse("You have no multimedia to download.") def log_errors(self, errors): logging.error( "Error downloading multimedia ZIP " "for domain %s and application %s." % ( self.domain, self.app_id) ) return HttpResponseServerError( "Errors were encountered while " "retrieving media for this application.<br /> %s" % ( "<br />".join(errors)) ) def get(self, request, *args, **kwargs): assert self.include_multimedia_files or self.include_index_files error_response = self.check_before_zipping() if error_response: return error_response message = request.GET['message'] if 'message' in request.GET else None download = DownloadBase(message=message) build_profile_id = None if domain_has_privilege(request.domain, privileges.BUILD_PROFILES): build_profile_id = request.GET.get('profile') download.set_task(build_application_zip.delay( include_multimedia_files=self.include_multimedia_files, include_index_files=self.include_index_files, app=self.app, download_id=download.download_id, compress_zip=self.compress_zip, filename=self.zip_name, build_profile_id=build_profile_id) ) return download.get_start_response() @method_decorator(safe_download) def dispatch(self, request, *args, **kwargs): return super(DownloadMultimediaZip, self).dispatch(request, *args, **kwargs) class MultimediaUploadStatusView(View): name = "hqmedia_upload_status" @property @memoized def processing_id(self): return self.request.POST.get('processing_id') @method_decorator(login_required) def dispatch(self, request, *args, **kwargs): return super(MultimediaUploadStatusView, self).dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): return HttpResponseBadRequest("Please post to this.") def post(self, request, *args, **kwargs): if not self.processing_id: return HttpResponseBadRequest("A processing_id is required.") status = BulkMultimediaStatusCache.get(self.processing_id) if status is None: # No status could be retrieved from the cache fake_status = BulkMultimediaStatusCache(self.processing_id) fake_status.complete = True fake_status.errors.append(_('There was an issue retrieving the status from the cache. ' 'We are looking into it. Please try uploading again.')) logging.error("[Multimedia Bulk Upload] Process ID #%s encountered an issue while retrieving " "a status from the cache." % self.processing_id) response = fake_status.get_response() else: response = status.get_response() return HttpResponse(json.dumps(response)) class ViewMultimediaFile(View): name = "hqmedia_download" @property @memoized def media_class(self): media_type = self.kwargs.get('media_type') try: return CommCareMultimedia.get_doc_class(media_type) except KeyError: raise Http404("Could not find media of that type.") @property @memoized def doc_id(self): return self.kwargs.get('doc_id') @property @memoized def multimedia(self): try: return self.media_class.get(self.doc_id) except ResourceNotFound: raise Http404("Media not found.") @property @memoized def thumb(self): thumb = self.request.GET.get('thumb') try: return int(thumb), int(thumb) except Exception: return None def get(self, request, *args, **kwargs): obj = CachedObject(str(self.doc_id) + ':' + self.kwargs.get('media_type') + ':' + str(self.thumb)) if not obj.is_cached(): data, content_type = self.multimedia.get_display_file() if self.thumb: data = CommCareImage.get_thumbnail_data(data, self.thumb) buffer = StringIO(data) metadata = {'content_type': content_type} obj.cache_put(buffer, metadata, timeout=None) else: metadata, buffer = obj.get() data = buffer.getvalue() content_type = metadata['content_type'] return HttpResponse(data, content_type=content_type) def iter_index_files(app, build_profile_id=None): from corehq.apps.app_manager.views.download import download_index_files skip_files = ('profile.xml', 'profile.ccpr', 'media_profile.xml') text_extensions = ('.xml', '.ccpr', '.txt') files = [] errors = [] def _get_name(f): return {'media_profile.ccpr': 'profile.ccpr'}.get(f, f) def _encode_if_unicode(s): return s.encode('utf-8') if isinstance(s, unicode) else s def _files(files): for name, f in files: if build_profile_id is not None: name = name.replace(build_profile_id + '/', '') if name not in skip_files: # TODO: make RemoteApp.create_all_files not return media files extension = os.path.splitext(name)[1] data = _encode_if_unicode(f) if extension in text_extensions else f yield (_get_name(name), data) try: files = download_index_files(app, build_profile_id) except Exception as e: errors = [unicode(e)] return _files(files), errors
bsd-3-clause
1,299,744,272,025,321,000
34.589425
121
0.627775
false
4.098872
false
false
false
IntersectAustralia/asvo-tao
core/PerformanceCode/WallTimeResultsMerge.py
1
2770
import pickle, os, logging,string import pg import locale import time from datetime import date import logging import settingReader class DBInterface(object): def __init__(self,Options): self.Options=Options self.InitDBConnection(self.Options) self.IsOpen=False self.QueriesCount=0 def InitDBConnection(self,Options): ####### PostgreSQL Backend Master DB ################# self.serverip=Options['PGDB:serverip'] self.username=Options['PGDB:user'] self.password=Options['PGDB:password'] self.port=int(Options['PGDB:port']) self.DBName=Options['PGDB:NewDBName'] self.CurrentConnection=pg.connect(host=self.serverip,user=self.username,passwd=self.password,port=self.port,dbname=self.DBName) print('Connection to DB is open...') self.IsOpen=True def CloseConnections(self): if self.IsOpen==True: self.CurrentConnection.close() print('Connection to DB is Closed...') self.IsOpen=False def ExecuteNoQuerySQLStatment(self,SQLStatment): try: self.CurrentConnection.query(SQLStatment) return True except Exception as Exp: print(">>>>>Error While Executing Non-Query SQL Statement") print(type(Exp)) print(Exp.args) print(Exp) print("Current SQL Statement =\n"+SQLStatment) return False def ExecuteQuerySQLStatment(self,SQLStatment): try: resultsList=self.CurrentConnection.query(SQLStatment).getresult() return resultsList except Exception as Exp: print(">>>>>Error While Executing Query SQL Statement") print(type(Exp)) print(Exp.args) print(Exp) print("Current SQL Statement =\n"+SQLStatment) def ExecuteQuerySQLStatmentAsDict(self,SQLStatment): try: resultsList=self.CurrentConnection.query(SQLStatment).dictresult() return resultsList except Exception as Exp: print(">>>>>Error While Executing Query SQL Statement") print(type(Exp)) print(Exp.args) print(Exp) print("Current SQL Statement =\n"+SQLStatment) if __name__ == '__main__': [Options]=settingReader.ParseParams("settings.xml") DBConnectionObj=DBInterface(Options) DBConnectionObj.CloseConnections()
gpl-3.0
8,723,199,746,973,042,000
34.075949
135
0.555596
false
4.6633
false
false
false
Mariaanisimova/pythonintask
IVTa/2014/EGOROV_V_I/task_8_8.py
1
2933
# Задача 8. Вариант 8 ''' Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка. Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений. Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку. ''' # Egorov V. I. # 15.05.2016 from random import shuffle, choice fine = 0 score = 0 attempts = 3 cont = 1 while cont == 1 and attempts > 0: def score_print(score): print ('У вас', score, 'очков') print ('У вас', attempts, 'попытки') words_and_info = ( ('Венера', 'Самая горячая планета Солнечной системы.'), ('Меркурий', 'Эта планета самая ближняя к Солнцу'), ('Юпитер', 'Самая большая планета Солнечной системы'), ('Плутон', 'Самая маленькая планета Солнечной системы'), ('Земля', 'Существование жизни на этой планете не оставляет никаких сомнений'), ('Сатурн', 'Эта планета имеет ярко выраженную систему колец'), ('Марс', 'На самом деле на этой плане есть вода'), ('Уран', 'Кажется, этой планете не сообщили, что обращаться вокруг своей оси нужно с востока на запад'), ('Нептун', 'Злые языки говорят, что именно эта планета - самая дальняя от Солнца в её системе.') ) choiced = choice(words_and_info) word = list(choiced[0].lower()) shuffle(word) word = ''.join(word) print('Отгадай планету Солнечной системы -', word) print('Наберите "подсказка", чтобы получить совет') score_print(score) fine=0 while True and attempts > 0: gues = input('> ').lower() if gues.lower() == 'подсказка': print('Подскaзка: ', choiced[1]) fine=1 score_print(score) continue elif gues.lower() == choiced[0].lower(): print('Правильно -', choiced[0]) score += 2 - fine score_print(score) break else: print('Не правильно') attempts -= 1 score_print(score) yepnope=input('Продолжить?') if yepnope == 'да' or yepnope == 'Да': cont = 1 else: cont = 0 input('Нажмите ENTER...')
apache-2.0
-1,640,939,314,107,939,300
27.333333
105
0.683333
false
1.558442
false
false
false
dasseclab/dasseclab
clones/routersploit/tests/payloads/x64/test_reverse_tcp.py
1
1678
from routersploit.modules.payloads.x64.reverse_tcp import Payload # reverse tcp with lhost=192.168.1.4 lport=4321 reverse_tcp = ( b"\x6a\x29\x58\x99\x6a\x02\x5f\x6a\x01\x5e\x0f\x05\x48\x97\x48" b"\xb9\x02\x00\x10\xe1\xc0\xa8\x01\x04\x51\x48\x89\xe6\x6a\x10" b"\x5a\x6a\x2a\x58\x0f\x05\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58" b"\x0f\x05\x75\xf6\x6a\x3b\x58\x99\x48\xbb\x2f\x62\x69\x6e\x2f" b"\x73\x68\x00\x53\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05" ) # elf x64 reverse tcp elf_x64_reverse_tcp = ( b"\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00" b"\x00\x02\x00\x3e\x00\x01\x00\x00\x00\x78\x00\x40\x00\x00\x00" b"\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" b"\x00\x00\x00\x00\x00\x00\x00\x40\x00\x38\x00\x01\x00\x00\x00" b"\x00\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00" b"\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00" b"\x40\x00\x00\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x00\x00\x0c" b"\x01\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00" b"\x6a\x29\x58\x99\x6a\x02\x5f\x6a\x01\x5e\x0f\x05\x48\x97\x48" b"\xb9\x02\x00\x10\xe1\xc0\xa8\x01\x04\x51\x48\x89\xe6\x6a\x10" b"\x5a\x6a\x2a\x58\x0f\x05\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58" b"\x0f\x05\x75\xf6\x6a\x3b\x58\x99\x48\xbb\x2f\x62\x69\x6e\x2f" b"\x73\x68\x00\x53\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05" ) def test_payload_generation(): """ Test scenario - payload generation """ payload = Payload() payload.lhost = "192.168.1.4" payload.lport = 4321 assert payload.generate() == reverse_tcp assert payload.generate_elf(reverse_tcp) == elf_x64_reverse_tcp
gpl-2.0
1,970,158,449,034,645,000
42.025641
67
0.679976
false
1.602674
false
false
false
flavoi/diventi
diventi/ebooks/migrations/0110_auto_20200830_1750.py
1
3345
# Generated by Django 2.2.13 on 2020-08-30 15:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ebooks', '0109_auto_20200821_1049'), ] operations = [ migrations.AlterField( model_name='book', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='chapter', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='part', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='replacementrule', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='secret', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='section', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='sectionaspect', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), migrations.AlterField( model_name='universalsection', name='color', field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'), ), ]
apache-2.0
6,576,685,759,129,036,000
62.113208
284
0.556652
false
3.862587
false
false
false
sindhus/hasjob
hasjob/models/__init__.py
1
2623
# -*- coding: utf-8 -*- # flake8: noqa from datetime import timedelta from coaster import LabeledEnum from coaster.db import db from coaster.sqlalchemy import (BaseMixin, BaseNameMixin, TimestampMixin, BaseScopedIdMixin, BaseScopedNameMixin, CoordinatesMixin, make_timestamp_columns) from .. import app agelimit = timedelta(days=30) newlimit = timedelta(days=1) class POSTSTATUS: DRAFT = 0 # Being written PENDING = 1 # Pending email verification CONFIRMED = 2 # Post is now live on site REVIEWED = 3 # Reviewed and cleared for push channels REJECTED = 4 # Reviewed and rejected as inappropriate WITHDRAWN = 5 # Withdrawn by owner FLAGGED = 6 # Flagged by users for review SPAM = 7 # Marked as spam MODERATED = 8 # Moderated, needs edit ANNOUNCEMENT = 9 # Special announcement CLOSED = 10 # Not accepting applications, but publicly viewable UNPUBLISHED = (DRAFT, PENDING) GONE = (REJECTED, WITHDRAWN, SPAM) LISTED = (CONFIRMED, REVIEWED, ANNOUNCEMENT) POSTPENDING = (CONFIRMED, REVIEWED, REJECTED, WITHDRAWN, FLAGGED, SPAM, MODERATED, ANNOUNCEMENT) MY = (DRAFT, PENDING, CONFIRMED, REVIEWED, MODERATED, ANNOUNCEMENT, CLOSED) ARCHIVED = (CONFIRMED, REVIEWED, ANNOUNCEMENT, CLOSED) class CURRENCY(LabeledEnum): INR = ('INR', 'INR') USD = ('USD', 'USD') EUR = ('EUR', 'EUR') __order__ = (INR, USD, EUR) class EMPLOYER_RESPONSE(LabeledEnum): NEW = (0, u"New") # New application PENDING = (1, u"Pending") # Employer viewed on website IGNORED = (2, u"Ignored") # Dismissed as not worth responding to REPLIED = (3, u"Replied") # Employer replied to candidate FLAGGED = (4, u"Flagged") # Employer reported a spammer SPAM = (5, u"Spam") # Admin marked this as spam REJECTED = (6, u"Rejected") # Employer rejected candidate with a message class PAY_TYPE(LabeledEnum): NOCASH = (0, u"Nothing") ONETIME = (1, u"One-time") RECURRING = (2, u"Recurring") class CANDIDATE_FEEDBACK(LabeledEnum): NORESPONSE = (0, u"No response") INPROCESS = (1, u"In process") DID_NOT_GET = (2, u"Did not get the job") DID_NOT_ACCEPT = (3, u"Got offer, did not accept") GOT_JOB = (4, u"Got the job") from .user import * from .jobcategory import * from .jobpostreport import * from .jobtype import * from .location import * from .tag import * from .reportcode import * from .jobpost import * from .domain import * from .board import * from .flags import * from .campaign import *
agpl-3.0
6,810,176,725,132,138,000
32.628205
100
0.655738
false
3.085882
false
false
false
leogregianin/pychess
lib/pychess/Players/CECPEngine.py
1
39654
import asyncio import itertools import re from gi.repository import Gtk, GObject from pychess.compat import create_task from pychess.Utils import wait_signal from pychess.System import conf from pychess.System.Log import log from pychess.widgets import mainwindow from pychess.Utils.Move import Move from pychess.Utils.Board import Board from pychess.Utils.Cord import Cord from pychess.Utils.Move import toSAN, toAN, parseAny from pychess.Utils.Offer import Offer from pychess.Utils.const import ANALYZING, INVERSE_ANALYZING, DRAW, WHITEWON, BLACKWON, \ WON_ADJUDICATION, DRAW_OFFER, ACTION_ERROR_NONE_TO_ACCEPT, CASTLE_KK, WHITE, \ CASTLE_SAN, FISCHERRANDOMCHESS, BLACK, reprSign, RESIGNATION from pychess.Utils.logic import validate, getMoveKillingKing from pychess.Utils.lutils.ldata import MATE_VALUE from pychess.Utils.lutils.lmove import ParsingError from pychess.Variants import variants from pychess.Players.Player import PlayerIsDead, TurnInterrupt, InvalidMove from .ProtocolEngine import ProtocolEngine, TIME_OUT_SECOND movere = re.compile(r""" ( # group start (?: # non grouping parenthesis start [PKQRBN]? # piece [a-h]?[1-8]? # unambiguous column or line x? # capture @? # drop [a-h][1-8] # destination square =?[QRBN]? # promotion |O\-O(?:\-O)? # castling |0\-0(?:\-0)? # castling ) # non grouping parenthesis end [+#]? # check/mate ) # group end \s* # any whitespace """, re.VERBOSE) d_plus_dot_expr = re.compile(r"\d+\.") anare = re.compile(""" ^ # beginning of string (\s* # \d+ [+\-\.]? # The ply analyzed. Some engines end it with a dot, minus or plus \s+) # (-?Mat\s*\d+ | [+\-\d\.]+) # The score found in centipawns. # Mat1 is used by gnuchess to specify mate in one. # otherwise we should support a signed float \s+ # ([\d\.]+) # The time used in centi-seconds \s+ # ([\d\.]+) # Number of nodes visited \s+ # (.+) # The Principal-Variation. With or without move numbers \s* # $ # end of string """, re.VERBOSE) # anare = re.compile("\(d+)\.?\s+ (Mat\d+|[-\d\.]+) \s+ \d+\s+\d+\s+((?:%s\s*)+)" % mov) whitespaces = re.compile(r"\s+") # There is no way in the CECP protocol to determine if an engine not answering # the protover=2 handshake with done=1 is old or just very slow. Thus we # need a timeout after which we conclude the engine is 'protover=1' and will # never answer. # XBoard will only give 2 seconds, but as we are quite sure that # the engines support the protocol, we can add more. We don't add # infinite time though, just in case. # The engine can get more time by sending done=0 class CECPEngine(ProtocolEngine): def __init__(self, subprocess, color, protover, md5): ProtocolEngine.__init__(self, subprocess, color, protover, md5) self.features = { "ping": 0, "setboard": 0, "playother": 0, "san": 0, "usermove": 0, "time": 1, "draw": 1, "sigint": 0, "sigterm": 0, "reuse": 0, "analyze": 0, "myname": ', '.join(self.defname), "variants": None, "colors": 1, "ics": 0, "name": 0, "pause": 0, "nps": 0, "debug": 0, "memory": 0, "smp": 0, "egt": '', "option": '', "exclude": 0, "done": None, } self.supported_features = [ "ping", "setboard", "san", "usermove", "time", "draw", "sigint", "analyze", "myname", "variants", "colors", "pause", "done", "egt", "debug", "smp", "memory", "option" ] self.options = {} self.options["Ponder"] = {"name": "Ponder", "type": "check", "default": False} self.name = None self.board = Board(setup=True) # if self.engineIsInNotPlaying == True, engine is in "force" mode, # i.e. not thinking or playing, but still verifying move legality self.engineIsInNotPlaying = False self.engineIsAnalyzing = False self.movenext = False self.waitingForMove = False self.readyForMoveNowCommand = False self.timeHandicap = 1 self.lastping = 0 self.lastpong = 0 self.queue = asyncio.Queue() self.parse_line_task = create_task(self.parseLine(self.engine)) self.died_cid = self.engine.connect("died", lambda e: self.queue.put_nowait("die")) self.invalid_move = None self.optionQueue = [] self.undoQueue = [] self.ready_moves_event = asyncio.Event() self.cids = [ self.connect_after("readyForOptions", self.__onReadyForOptions), self.connect_after("readyForMoves", self.__onReadyForMoves), ] # Starting the game def prestart(self): print("xboard", file=self.engine) if self.protover == 1: # start a new game (CECPv1 engines): print("new", file=self.engine) # we are now ready for options: self.emit("readyForOptions") elif self.protover == 2: # start advanced protocol initialisation: print("protover 2", file=self.engine) # we don't start a new game for CECPv2 here, # we will do it after feature accept/reject is completed. def start(self, event, is_dead): create_task(self.__startBlocking(event, is_dead)) @asyncio.coroutine def __startBlocking(self, event, is_dead): if self.protover == 1: self.emit("readyForMoves") return_value = "ready" if self.protover == 2: try: return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND) if return_value == "not ready": return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND) # Gaviota sends done=0 after "xboard" and after "protover 2" too if return_value == "not ready": return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND) self.emit("readyForOptions") self.emit("readyForMoves") except asyncio.TimeoutError: log.warning("Got timeout error", extra={"task": self.defname}) is_dead.add(True) except Exception: log.warning("Unknown error", extra={"task": self.defname}) is_dead.add(True) else: if return_value == "die": is_dead.add(True) assert return_value == "ready" or return_value == "del" if event is not None: event.set() def __onReadyForOptions(self, self_): # We always want post turned on so the Engine Output sidebar can # show those things -Jonas Thiem print("post", file=self.engine) for command in self.optionQueue: print(command, file=self.engine) def __onReadyForMoves(self, self_): if self.mode in (ANALYZING, INVERSE_ANALYZING): # workaround for crafty not sending analysis after it has found a mating line # http://code.google.com/p/pychess/issues/detail?id=515 if "crafty" in self.features["myname"].lower(): print("noise 0", file=self.engine) self.__sendAnalyze(self.mode == INVERSE_ANALYZING) self.ready_moves_event.set() self.readyMoves = True # Ending the game def end(self, status, reason): self.parse_line_task.cancel() if self.engine.handler_is_connected(self.died_cid): self.engine.disconnect(self.died_cid) if self.handler_is_connected(self.analyze_cid): self.disconnect(self.analyze_cid) for cid in self.cids: if self.handler_is_connected(cid): self.disconnect(cid) self.board = None if self.connected: # We currently can't fillout the comment "field" as the repr strings # for reasons and statuses lies in Main.py # Creating Status and Reason class would solve this if status == DRAW: print("result 1/2-1/2 {?}", file=self.engine) elif status == WHITEWON: print("result 1-0 {?}", file=self.engine) elif status == BLACKWON: print("result 0-1 {?}", file=self.engine) else: print("result * {?}", file=self.engine) if reason == WON_ADJUDICATION: self.queue.put_nowait("invalid") # Make sure the engine exits and do some cleaning self.kill(reason) def kill(self, reason): """ Kills the engine, starting with the 'quit' command, then sigterm and eventually sigkill. Returns the exitcode, or if engine have already been killed, returns None """ if self.connected: self.connected = False try: try: print("quit", file=self.engine) self.queue.put_nowait("del") self.engine.terminate() except OSError as err: # No need to raise on a hang up error, as the engine is dead # anyways if err.errno == 32: log.warning("Hung up Error", extra={"task": self.defname}) return err.errno else: raise finally: # Clear the analyzed data, if any self.emit("analyze", []) # Send the player move updates def setBoard(self, board, search=True): def coro(): if self.engineIsAnalyzing: self.__stop_analyze() yield from asyncio.sleep(0.1) self.setBoardList([board], []) if search: self.__sendAnalyze(self.mode == INVERSE_ANALYZING) create_task(coro()) def putMove(self, board1, move, board2): """ Sends the engine the last move made (for spectator engines). @param board1: The current board @param move: The last move made @param board2: The board before the last move was made """ def coro(): if self.engineIsAnalyzing: self.__stop_analyze() yield from asyncio.sleep(0.1) self.setBoardList([board1], []) if not self.analyzing_paused: self.__sendAnalyze(self.mode == INVERSE_ANALYZING) create_task(coro()) @asyncio.coroutine def makeMove(self, board1, move, board2): """ Gets a move from the engine (for player engines). @param board1: The current board @param move: The last move made @param board2: The board before the last move was made @return: The move the engine decided to make """ log.debug("makeMove: move=%s self.movenext=%s board1=%s board2=%s self.board=%s" % ( move, self.movenext, board1, board2, self.board), extra={"task": self.defname}) assert self.readyMoves if self.board == board1 or not board2 or self.movenext: self.board = board1 self.__tellEngineToPlayCurrentColorAndMakeMove() self.movenext = False else: self.board = board1 self.__usermove(board2, move) if self.engineIsInNotPlaying: self.__tellEngineToPlayCurrentColorAndMakeMove() self.waitingForMove = True self.readyForMoveNowCommand = True # Parse outputs status = yield from self.queue.get() if status == "not ready": log.warning( "Engine seems to be protover=2, but is treated as protover=1", extra={"task": self.defname}) status = yield from self.queue.get() if status == "ready": status = yield from self.queue.get() if status == "invalid": raise InvalidMove if status == "del" or status == "die": raise PlayerIsDead("Killed by foreign forces") if status == "int": raise TurnInterrupt self.waitingForMove = False self.readyForMoveNowCommand = False assert isinstance(status, Move), status return status def updateTime(self, secs, opsecs): if self.features["time"]: print("time %s" % int(secs * 100 * self.timeHandicap), file=self.engine) print("otim %s" % int(opsecs * 100), file=self.engine) # Standard options def setOptionAnalyzing(self, mode): self.mode = mode def setOptionInitialBoard(self, model): @asyncio.coroutine def coro(): yield from self.ready_moves_event.wait() # We don't use the optionQueue here, as set board prints a whole lot of # stuff. Instead we just call it. self.setBoardList(model.boards[:], model.moves[:]) create_task(coro()) def setBoardList(self, boards, moves): # Notice: If this method is to be called while playing, the engine will # need 'new' and an arrangement similar to that of 'pause' to avoid # the current thought move to appear if self.mode not in (ANALYZING, INVERSE_ANALYZING): self.__tellEngineToStopPlayingCurrentColor() self.__setBoard(boards[0]) self.board = boards[-1] for board, move in zip(boards[:-1], moves): self.__usermove(board, move) if self.mode in (ANALYZING, INVERSE_ANALYZING): self.board = boards[-1] if self.mode == INVERSE_ANALYZING: self.board = self.board.switchColor() # The called of setBoardList will have to repost/analyze the # analyzer engines at this point. def setOptionVariant(self, variant): if self.features["variants"] is None: log.warning("setOptionVariant: engine doesn't support variants", extra={"task": self.defname}) return if variant in variants.values() and not variant.standard_rules: assert variant.cecp_name in self.features["variants"], \ "%s doesn't support %s variant" % (self, variant.cecp_name) self.optionQueue.append("variant %s" % variant.cecp_name) # Strength system # # Strength Depth Ponder Time handicap # # 1 1 o 1,258% # # 2 2 o 1,584% # # 3 3 o 1.995% # # # # 19 o x 79,43% # # 20 o x o # def setOptionStrength(self, strength, forcePonderOff): self.strength = strength if strength <= 19: self.__setTimeHandicap(0.01 * 10 ** (strength / 10.)) if strength <= 18: self.__setDepth(strength) # Crafty ofers 100 skill levels if "crafty" in self.features["myname"].lower() and strength <= 19: self.optionQueue.append("skill %s" % strength * 5) self.__setPonder(strength >= 19 and not forcePonderOff) if strength == 20: if "gaviota" in self.features["egt"]: self.optionQueue.append("egtpath gaviota %s" % conf.get("egtb_path")) else: self.optionQueue.append("random") def __setDepth(self, depth): self.optionQueue.append("sd %d" % depth) def __setTimeHandicap(self, timeHandicap): self.timeHandicap = timeHandicap def __setPonder(self, ponder): if ponder: self.optionQueue.append("hard") else: self.optionQueue.append("hard") self.optionQueue.append("easy") def setOptionTime(self, secs, gain, moves): # Notice: In CECP we apply time handicap in updateTime, not in # setOptionTime. minutes = int(secs / 60) secs = int(secs % 60) mins = str(minutes) if secs: mins += ":" + str(secs) self.optionQueue.append("level %s %s %d" % (moves, mins, gain)) # Option handling def setOption(self, key, value): """ Set an option, which will be sent to the engine, after the 'readyForOptions' signal has passed. If you want to know the possible options, you should go to engineDiscoverer or use the hasOption method while you are in your 'readyForOptions' signal handler """ if self.readyMoves: log.warning( "Options set after 'readyok' are not sent to the engine", extra={"task": self.defname}) if key == "cores": self.optionQueue.append("cores %s" % value) elif key == "memory": self.optionQueue.append("memory %s" % value) elif key.lower() == "ponder": self.__setPonder(value == 1) else: self.optionQueue.append("option %s=%s" % (key, value)) # Interacting with the player def pause(self): """ Pauses engine using the "pause" command if available. Otherwise put engine in force mode. By the specs the engine shouldn't ponder in force mode, but some of them do so anyways. """ log.debug("pause: self=%s" % self, extra={"task": self.defname}) if self.isAnalyzing(): self.__stop_analyze() self.analyzing_paused = True else: self.engine.pause() return def resume(self): log.debug("resume: self=%s" % self, extra={"task": self.defname}) if self.isAnalyzing(): self.__sendAnalyze(self.mode == INVERSE_ANALYZING) self.analyzing_paused = False else: self.engine.resume() return def hurry(self): log.debug("hurry: self.waitingForMove=%s self.readyForMoveNowCommand=%s" % ( self.waitingForMove, self.readyForMoveNowCommand), extra={"task": self.defname}) if self.waitingForMove and self.readyForMoveNowCommand: self.__tellEngineToMoveNow() self.readyForMoveNowCommand = False def spectatorUndoMoves(self, moves, gamemodel): if self.analyzing_paused: return log.debug("spectatorUndoMoves: moves=%s gamemodel.ply=%s gamemodel.boards[-1]=%s self.board=%s" % ( moves, gamemodel.ply, gamemodel.boards[-1], self.board), extra={"task": self.defname}) for i in range(moves): print("undo", file=self.engine) self.board = gamemodel.boards[-1] def playerUndoMoves(self, moves, gamemodel): log.debug("CECPEngine.playerUndoMoves: moves=%s self=%s gamemodel.curplayer=%s" % (moves, self, gamemodel.curplayer), extra={"task": self.defname}) self.board = gamemodel.boards[-1] self.__tellEngineToStopPlayingCurrentColor() for i in range(moves): print("undo", file=self.engine) if gamemodel.curplayer != self and moves % 2 == 1 or \ (gamemodel.curplayer == self and moves % 2 == 0): # Interrupt if we were searching, but should no longer do so log.debug("CECPEngine.playerUndoMoves: putting TurnInterrupt into self.move_queue %s" % self.name, extra={"task": self.defname}) self.queue.put_nowait("int") # Offer handling def offer(self, offer): if offer.type == DRAW_OFFER: if self.features["draw"]: print("draw", file=self.engine) else: self.emit("accept", offer) def offerError(self, offer, error): if self.features["draw"]: # We don't keep track if engine draws are offers or accepts. We just # Always assume they are accepts, and if they are not, we get this # error and emit offer instead if offer.type == DRAW_OFFER and error == ACTION_ERROR_NONE_TO_ACCEPT: self.emit("offer", Offer(DRAW_OFFER)) # Internal def __usermove(self, board, move): if self.features["usermove"]: self.engine.write("usermove ") if self.features["san"]: print(toSAN(board, move), file=self.engine) else: castle_notation = CASTLE_KK if board.variant == FISCHERRANDOMCHESS: castle_notation = CASTLE_SAN print( toAN(board, move, short=True, castleNotation=castle_notation), file=self.engine) def __tellEngineToMoveNow(self): if self.features["sigint"]: self.engine.sigint() print("?", file=self.engine) def __tellEngineToStopPlayingCurrentColor(self): print("force", file=self.engine) self.engineIsInNotPlaying = True def __tellEngineToPlayCurrentColorAndMakeMove(self): self.__printColor() print("go", file=self.engine) self.engineIsInNotPlaying = False def __stop_analyze(self): if self.engineIsAnalyzing: print("exit", file=self.engine) # Some engines (crafty, gnuchess) doesn't respond to exit command # we try to force them to stop with an empty board fen print("setboard 8/8/8/8/8/8/8/8 w - - 0 1", file=self.engine) self.engineIsAnalyzing = False def __sendAnalyze(self, inverse=False): if inverse and self.board.board.opIsChecked(): # Many engines don't like positions able to take down enemy # king. Therefore we just return the "kill king" move # automaticaly self.emit("analyze", [(self.board.ply, [toAN( self.board, getMoveKillingKing(self.board))], MATE_VALUE - 1, "1", "")]) return print("post", file=self.engine) print("analyze", file=self.engine) self.engineIsAnalyzing = True if not conf.get("infinite_analysis"): loop = asyncio.get_event_loop() loop.call_later(conf.get("max_analysis_spin"), self.__stop_analyze) def __printColor(self): if self.features["colors"]: # or self.mode == INVERSE_ANALYZING: if self.board.color == WHITE: print("white", file=self.engine) else: print("black", file=self.engine) def __setBoard(self, board): if self.features["setboard"]: self.__tellEngineToStopPlayingCurrentColor() fen = board.asFen(enable_bfen=False) if self.mode == INVERSE_ANALYZING: fen_arr = fen.split() if not self.board.board.opIsChecked(): if fen_arr[1] == "b": fen_arr[1] = "w" else: fen_arr[1] = "b" fen = " ".join(fen_arr) print("setboard %s" % fen, file=self.engine) else: # Kludge to set black to move, avoiding the troublesome and now # deprecated "black" command. - Equal to the one xboard uses self.__tellEngineToStopPlayingCurrentColor() if board.color == BLACK: print("a2a3", file=self.engine) print("edit", file=self.engine) print("#", file=self.engine) for color in WHITE, BLACK: for y_loc, row in enumerate(board.data): for x_loc, piece in row.items(): if not piece or piece.color != color: continue sign = reprSign[piece.sign] cord = repr(Cord(x_loc, y_loc)) print(sign + cord, file=self.engine) print("c", file=self.engine) print(".", file=self.engine) # Parsing @asyncio.coroutine def parseLine(self, proc): while True: line = yield from wait_signal(proc, 'line') if not line: break else: line = line[1] if line[0:1] == "#": # Debug line which we shall ignore as specified in CECPv2 specs continue # log.debug("__parseLine: line=\"%s\"" % line.strip(), extra={"task":self.defname}) parts = whitespaces.split(line.strip()) if parts[0] == "pong": self.lastpong = int(parts[1]) continue # Illegal Move if parts[0].lower().find("illegal") >= 0: log.warning("__parseLine: illegal move: line=\"%s\", board=%s" % ( line.strip(), self.board), extra={"task": self.defname}) if parts[-2] == "sd" and parts[-1].isdigit(): print("depth", parts[-1], file=self.engine) continue # A Move (Perhaps) if self.board: if parts[0] == "move": movestr = parts[1] # Old Variation elif d_plus_dot_expr.match(parts[0]) and parts[1] == "...": movestr = parts[2] else: movestr = False if movestr: self.waitingForMove = False self.readyForMoveNowCommand = False if self.engineIsInNotPlaying: # If engine was set in pause just before the engine sent its # move, we ignore it. However the engine has to know that we # ignored it, and thus we step it one back log.info("__parseLine: Discarding engine's move: %s" % movestr, extra={"task": self.defname}) print("undo", file=self.engine) continue else: try: move = parseAny(self.board, movestr) except ParsingError: self.invalid_move = movestr log.info( "__parseLine: ParsingError engine move: %s %s" % (movestr, self.board), extra={"task": self.defname}) self.end(WHITEWON if self.board.color == BLACK else BLACKWON, WON_ADJUDICATION) continue if validate(self.board, move): self.board = None self.queue.put_nowait(move) continue else: self.invalid_move = movestr log.info( "__parseLine: can't validate engine move: %s %s" % (movestr, self.board), extra={"task": self.defname}) self.end(WHITEWON if self.board.color == BLACK else BLACKWON, WON_ADJUDICATION) continue # Analyzing if self.engineIsInNotPlaying: if parts[:4] == ["0", "0", "0", "0"]: # Crafty doesn't analyze until it is out of book print("book off", file=self.engine) continue match = anare.match(line) if match: depth, score, time, nodes, moves = match.groups() if "mat" in score.lower() or "#" in moves: # Will look either like -Mat 3 or Mat3 scoreval = MATE_VALUE if score.startswith('-'): scoreval = -scoreval else: scoreval = int(score) nps = str(int(int(nodes) / (int(time) / 100))) if int(time) > 0 else "" mvstrs = movere.findall(moves) if mvstrs: self.emit("analyze", [(self.board.ply, mvstrs, scoreval, depth.strip(), nps)]) continue # Offers draw if parts[0:2] == ["offer", "draw"]: self.emit("accept", Offer(DRAW_OFFER)) continue # Resigns if parts[0] == "resign" or \ (parts[0] == "tellics" and parts[1] == "resign"): # buggy crafty # Previously: if "resign" in parts, # however, this is too generic, since "hint", "bk", # "feature option=.." and possibly other, future CECPv2 # commands can validly contain the word "resign" without this # being an intentional resign offer. self.emit("offer", Offer(RESIGNATION)) continue # if parts[0].lower() == "error": # continue # Tell User Error if parts[0] == "tellusererror": # We don't want to see our stop analyzer hack as an error message if "8/8/8/8/8/8/8/8" in "".join(parts[1:]): continue # Create a non-modal non-blocking message dialog with the error: dlg = Gtk.MessageDialog(mainwindow(), flags=0, type=Gtk.MessageType.WARNING, buttons=Gtk.ButtonsType.CLOSE, message_format=None) # Use the engine name if already known, otherwise the defname: displayname = self.name if not displayname: displayname = self.defname # Compose the dialog text: dlg.set_markup(GObject.markup_escape_text(_( "The engine %s reports an error:") % displayname) + "\n\n" + GObject.markup_escape_text(" ".join(parts[1:]))) # handle response signal so the "Close" button works: dlg.connect("response", lambda dlg, x: dlg.destroy()) dlg.show_all() continue # Tell Somebody if parts[0][:4] == "tell" and \ parts[0][4:] in ("others", "all", "ics", "icsnoalias"): log.info("Ignoring tell %s: %s" % (parts[0][4:], " ".join(parts[1:]))) continue if "feature" in parts: # Some engines send features after done=1, so we will iterate after done=1 too done1 = False # We skip parts before 'feature', as some engines give us lines like # White (1) : feature setboard=1 analyze...e="GNU Chess 5.07" done=1 parts = parts[parts.index("feature"):] for i, pair in enumerate(parts[1:]): # As "parts" is split with no thoughs on quotes or double quotes # we need to do some extra handling. if pair.find("=") < 0: continue key, value = pair.split("=", 1) if key not in self.features: continue if value.startswith('"') and value.endswith('"'): value = value[1:-1] # If our pair was unfinished, like myname="GNU, we search the # rest of the pairs for a quotating mark. elif value[0] == '"': rest = value[1:] + " " + " ".join(parts[2 + i:]) j = rest.find('"') if j == -1: log.warning("Missing endquotation in %s feature", extra={"task": self.defname}) value = rest else: value = rest[:j] elif value.isdigit(): value = int(value) if key in self.supported_features: print("accepted %s" % key, file=self.engine) else: print("rejected %s" % key, file=self.engine) if key == "done": if value == 1: done1 = True continue elif value == 0: log.info("Adds %d seconds timeout" % TIME_OUT_SECOND, extra={"task": self.defname}) # This'll buy you some more time self.queue.put_nowait("not ready") break if key == "smp" and value == 1: self.options["cores"] = {"name": "cores", "type": "spin", "default": 1, "min": 1, "max": 64} elif key == "memory" and value == 1: self.options["memory"] = {"name": "memory", "type": "spin", "default": 32, "min": 1, "max": 4096} elif key == "option" and key != "done": option = self.__parse_option(value) self.options[option["name"]] = option else: self.features[key] = value if key == "myname" and not self.name: self.setName(value) if done1: # Start a new game before using the engine: # (CECPv2 engines) print("new", file=self.engine) # We are now ready for play: self.emit("readyForOptions") self.emit("readyForMoves") self.queue.put_nowait("ready") # A hack to get better names in protover 1. # Unfortunately it wont work for now, as we don't read any lines from # protover 1 engines. When should we stop? if self.protover == 1: if self.defname[0] in ''.join(parts): basis = self.defname[0] name = ' '.join(itertools.dropwhile( lambda part: basis not in part, parts)) self.features['myname'] = name if not self.name: self.setName(name) def __parse_option(self, option): if " -check " in option: name, value = option.split(" -check ") return {"type": "check", "name": name, "default": bool(int(value))} elif " -spin " in option: name, value = option.split(" -spin ") defv, minv, maxv = value.split() return {"type": "spin", "name": name, "default": int(defv), "min": int(minv), "max": int(maxv)} elif " -slider " in option: name, value = option.split(" -slider ") defv, minv, maxv = value.split() return {"type": "spin", "name": name, "default": int(defv), "min": int(minv), "max": int(maxv)} elif " -string " in option: name, value = option.split(" -string ") return {"type": "text", "name": name, "default": value} elif " -file " in option: name, value = option.split(" -file ") return {"type": "text", "name": name, "default": value} elif " -path " in option: name, value = option.split(" -path ") return {"type": "text", "name": name, "default": value} elif " -combo " in option: name, value = option.split(" -combo ") choices = list(map(str.strip, value.split("///"))) default = "" for choice in choices: if choice.startswith("*"): index = choices.index(choice) default = choice[1:] choices[index] = default break return {"type": "combo", "name": name, "default": default, "choices": choices} elif " -button" in option: pos = option.find(" -button") return {"type": "button", "name": option[:pos]} elif " -save" in option: pos = option.find(" -save") return {"type": "button", "name": option[:pos]} elif " -reset" in option: pos = option.find(" -reset") return {"type": "button", "name": option[:pos]} # Info def canAnalyze(self): assert self.ready, "Still waiting for done=1" return self.features["analyze"] def getAnalysisLines(self): return 1 def minAnalysisLines(self): return 1 def maxAnalysisLines(self): return 1 def requestMultiPV(self, setting): return 1 def __repr__(self): if self.name: return self.name return self.features["myname"]
gpl-3.0
7,185,567,614,037,482,000
39.054545
140
0.489459
false
4.349934
false
false
false
mate-desktop/pluma
tools/preprocessor.py
1
5353
# -*- coding: utf-8 -*- # preprocessor.py - simple preprocessor for plugin template files # This file is part of pluma # # Copyright (C) 2006 - Steve Frécinaux # Copyright (C) 2012-2021 MATE Developers # # pluma is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # pluma is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pluma; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA import sys import re class DeepnessException(Exception): def __init__(self): Exception.__init__(self) statements = [re.compile("^##\s*%s\s*$" % pattern) for pattern in ['(?P<stmt>ifdef|ifndef)\s+(?P<key>[^\s]+)', '(?P<stmt>elif|if)\s+(?P<expr>.+)', '(?P<stmt>else|endif)', '(?P<stmt>define)\s+(?P<key>[^\s]+)(\s+(?P<val>.+))?', '(?P<stmt>undef)\s+(?P<key>[^\s]+)']] variable = re.compile("##\((?P<name>[a-zA-Z_][a-zA-Z0-9_]*)(?P<mods>(\.[a-z]+)+)?\)") def _eval(expr, macros): return eval(expr, {'defined': lambda x: macros.has_key(x)}, macros) def _subvar(match, macros): name = match.group('name') if name in macros: val = str(macros[name]) if val is None: return '' else: return '' mods = match.group('mods') if mods is not None: for mod in mods[1:].split('.'): if mod == 'lower': val = val.lower() elif mod == 'upper': val = val.upper() elif mod == 'camel': val = ''.join(i.capitalize() for i in val.split('_')) return val def process(infile = sys.stdin, outfile = sys.stdout, macros = {}): if not isinstance(infile, file): infile = open(infile, mode = 'r') close_infile = True else: close_infile = False if not isinstance(outfile, file): outfile = open(outfile, mode = 'w') close_outfile = True else: close_outfile = False deepness = 0 writing_disabled = None for line in infile: # Skip comments if line[0:3].lower() == '##c': continue # Check whether current line is a preprocessor directive for statement in statements: match = statement.match(line) if match: break if match is not None: stmt = match.group('stmt') if stmt == "define": if writing_disabled is None: key = match.group('key') val = match.group('val') macros[key] = val elif stmt == "undef": if writing_disabled is None: key = match.group('key') if key in macros: del macros[key] elif stmt == "ifdef": deepness += 1 if writing_disabled is None and \ match.group('key') not in macros: writing_disabled = deepness elif stmt == "ifndef": deepness += 1 if writing_disabled is None and \ match.group('key') in macros: writing_disabled = deepness elif stmt == "if": deepness += 1 if writing_disabled is None and \ not _eval(match.group('expr'), macros): writing_disabled = deepness elif stmt == "elif": if deepness == 0: raise DeepnessException() if writing_disabled is None and \ not _eval(match.group('expr'), macros): writing_disabled = deepness elif writing_disabled == deepness: writing_disabled = None elif stmt == "else": if deepness == 0: raise DeepnessException() if writing_disabled is None: writing_disabled = deepness elif writing_disabled == deepness: writing_disabled = None elif stmt == "endif": if deepness == 0: raise DeepnessException() if writing_disabled is not None and \ writing_disabled == deepness: writing_disabled = None deepness -= 1 # Do variable substitution in the remaining lines elif writing_disabled is None: outfile.write(re.sub(variable, lambda m: _subvar(m, macros), line)) if deepness != 0: raise DeepnessException() if close_infile: infile.close() if close_outfile: outfile.close() # ex:ts=4:et:
gpl-2.0
851,380,180,004,629,900
32.037037
85
0.508969
false
4.390484
false
false
false
feedhq/feedhq
feedhq/feeds/tasks.py
1
8652
from collections import defaultdict from datetime import timedelta import requests import structlog from django.conf import settings from django.utils import timezone from django_push.subscriber.models import Subscription, SubscriptionError from rache import schedule_job from requests.exceptions import MissingSchema from rq.timeouts import JobTimeoutException from .. import es from ..profiles.models import User from ..utils import get_redis_connection logger = structlog.get_logger(__name__) # TODO remove unused request_timeout def update_feed(url, etag=None, modified=None, subscribers=1, request_timeout=10, backoff_factor=1, error=None, link=None, title=None, hub=None): from .models import UniqueFeed try: UniqueFeed.objects.update_feed( url, etag=etag, last_modified=modified, subscribers=subscribers, backoff_factor=backoff_factor, previous_error=error, link=link, title=title, hub=hub) except JobTimeoutException: backoff_factor = min(UniqueFeed.MAX_BACKOFF, backoff_factor + 1) logger.info("job timed out, backing off", url=url, backoff_factor=backoff_factor) schedule_job(url, schedule_in=UniqueFeed.delay(backoff_factor), backoff_factor=backoff_factor, connection=get_redis_connection()) except BaseException as e: logger.info("fatal job exception", url=url, exc_info=e) raise def read_later(user_id, entry_pk): user = User.objects.get(pk=user_id) entry = es.entry(user, entry_pk, annotate_results=False) entry.user = user entry.read_later() def update_favicon(feed_url, force_update=False): from .models import Favicon Favicon.objects.update_favicon(feed_url, force_update=force_update) def ensure_subscribed(topic_url, hub_url): """Makes sure the PubSubHubbub subscription is verified""" if settings.TESTS: if str(type(requests.post)) != "<class 'unittest.mock.MagicMock'>": raise ValueError("Not Mocked") if hub_url is None: return log = logger.bind(topic_url=topic_url, hub_url=hub_url) call, args = None, () try: s = Subscription.objects.get(topic=topic_url, hub=hub_url) except Subscription.DoesNotExist: log.info("subscribing") call = Subscription.objects.subscribe args = topic_url, hub_url else: if ( not s.verified or s.lease_expiration < timezone.now() + timedelta(days=1) ): log.info("renewing subscription", subscription=s.pk) call = s.subscribe if call is not None: try: call(*args) except SubscriptionError as e: log.info("subscription error", exc_info=e) except MissingSchema: pass def should_skip(date, ttl): delta = timedelta(days=ttl) return date + delta < timezone.now() def store_entries(feed_url, entries): from .models import Entry, Feed feeds = Feed.objects.select_related('user').filter( url=feed_url, user__is_suspended=False).values('pk', 'user_id', 'category_id', 'user__ttl') guids = set([entry['guid'] for entry in entries]) es_query = [{'or': [{'term': {'feed': feed['pk']}} for feed in feeds]}] # When we have dates, filter the query to avoid returning the whole dataset date_generated = any([e.pop('date_generated') for e in entries]) if not date_generated: earliest = min([entry['date'] for entry in entries]) limit = earliest - timedelta(days=1) es_query.append({'range': {'timestamp': {'gt': limit}}}) filter_by_title = len(guids) == 1 and len(entries) > 1 if filter_by_title: # All items have the same guid. Query by title instead. titles = set([entry['title'] for entry in entries]) es_query.append({'or': [{'term': {'raw_title': t}} for t in titles]}) else: es_query.append({'or': [{'term': {'guid': g}} for g in guids]}) existing = None indices = [] for feed in feeds: indices.append(es.user_alias(feed['user_id'])) if indices: es.wait_for_yellow() # Make sure guid and raw_title are not analyzed before querying # anything. Otherwise existing entries are never matched and things # keep being inserted. mappings = es.client.indices.get_field_mapping(index=",".join(indices), doc_type='entries', field='guid,raw_title') for mapping in mappings.values(): mapping = mapping['mappings']['entries'] for f in ['raw_title', 'guid']: assert mapping[f]['mapping'][f]['index'] == 'not_analyzed' existing_es = es.client.search( index=",".join(indices), doc_type='entries', body={ 'aggs': { 'existing': { 'filter': {'and': es_query}, 'aggs': { 'feeds': { 'terms': {'field': 'feed', 'size': 0}, 'aggs': { 'guids': {'terms': {'field': 'guid', 'size': 0}}, 'titles': {'terms': {'field': 'raw_title', 'size': 0}}, }, }, }, }, }, }, ) existing_es = existing_es[ 'aggregations']['existing']['feeds']['buckets'] else: existing_es = [] existing_guids = defaultdict(set) existing_titles = defaultdict(set) if existing is not None: for entry in existing: existing_guids[entry['feed_id']].add(entry['guid']) if filter_by_title: existing_titles[entry['feed_id']].add(entry['title']) existing_es_guids = defaultdict(set) existing_es_titles = defaultdict(set) for bucket in existing_es: for sub in bucket['guids']['buckets']: existing_es_guids[bucket['key']].add(sub['key']) if filter_by_title: for sub in bucket['titles']['buckets']: existing_es_titles[bucket['key']].add(sub['key']) ops = [] refresh_updates = defaultdict(list) for feed in feeds: seen_guids = set() seen_titles = set() for entry in entries: if ( not filter_by_title and entry['guid'] in existing_es_guids[feed['pk']] ): continue if ( filter_by_title and entry['title'] in existing_es_titles[feed['pk']] ): continue if ( feed['user__ttl'] and should_skip(entry['date'], feed['user__ttl']) ): continue if filter_by_title and entry['title'] in seen_titles: continue seen_titles.add(entry['title']) if not filter_by_title and entry['guid'] in seen_guids: continue seen_guids.add(entry['guid']) data = Entry(**entry).serialize() data['category'] = feed['category_id'] data['feed'] = feed['pk'] data['_id'] = es.next_id() data['id'] = data['_id'] data['_type'] = 'entries' data['user'] = feed['user_id'] data['_index'] = settings.ES_INDEX ops.append(data) refresh_updates[feed['user_id']].append(entry['date']) if ops: es.bulk(ops, raise_on_error=True) if settings.TESTS: # Indices are refreshed asynchronously. Refresh immediately # during tests. indices = ",".join(set([doc['_index'] for doc in ops])) es.client.indices.refresh(indices) redis = get_redis_connection() for user_id, dates in refresh_updates.items(): user = User(pk=user_id) new_score = float(max(dates).strftime('%s')) current_score = redis.zscore(user.last_update_key, feed_url) or 0 if new_score > current_score: redis.zadd(user.last_update_key, feed_url, new_score)
bsd-3-clause
-1,002,127,284,446,079,100
35.200837
79
0.537217
false
4.237023
false
false
false
zmsch27/Python
PythonBase/Python_SQL.py
1
5850
#以下来自廖雪峰的Python学习之Python数据库 #SQLite//////////////////////////////////////////////////// #SQLite是一种嵌入式数据库,它的数据库就是一个文件。由于SQLite本身是C写的,而且体积很小 #所以,经常被集成到各种应用程序中,甚至在iOS和Android的App中都可以集成。 #Python就内置了SQLite3,所以,在Python中使用SQLite,不需要安装任何东西,直接使用。 # 导入SQLite驱动: import sqlite3 # 连接到SQLite数据库 # 数据库文件是test.db # 如果文件不存在,会自动在当前目录创建: conn = sqlite3.connect('test.db') # 创建一个Cursor: cursor = conn.cursor() # 执行一条SQL语句,创建user表: cursor.execute('create table user (id varchar(20) primary key, name varchar(20))') # 继续执行一条SQL语句,插入一条记录: cursor.execute('insert into user (id, name) values (\'1\', \'Michael\')') # 通过rowcount获得插入的行数: print(cursor.rowcount) # 关闭Cursor: cursor.close() # 提交事务: conn.commit() # 关闭Connection: conn.close() #我们再试试查询记录: conn = sqlite3.connect('test.db') cursor = conn.cursor() # 执行查询语句: cursor.execute('select * from user where id=?', ('1',)) # 获得查询结果集: values = cursor.fetchall() print(values) cursor.close() conn.close() #使用Python的DB-API时,只要搞清楚Connection和Cursor对象,打开后一定记得关闭,就可以放心地使用。 #使用Cursor对象执行insert,update,delete语句时,执行结果由rowcount返回影响的行数,就可以拿到执行结果。 #使用Cursor对象执行select语句时,通过featchall()可以拿到结果集。结果集是一个list,每个元素都是一个tuple,对应一行记录。 #如果SQL语句带有参数,那么需要把参数按照位置传递给execute()方法,有几个?占位符就必须对应几个参数,例如: #cursor.execute('select * from user where name=? and pwd=?', ('abc', 'password')) print('-----------------------------------------\n') #MySQL///////////////////////////////////////////////////////////// #MySQL是Web世界中使用最广泛的数据库服务器。SQLite的特点是轻量级、可嵌入,但不能承受高并发访问,适合桌面和移动应用。 #而MySQL是为服务器端设计的数据库,能承受高并发访问,同时占用的内存也远远大于SQLite。 #此外,MySQL内部有多种数据库引擎,最常用的引擎是支持数据库事务的InnoDB。 # 导入MySQL驱动: import mysql.connector # 注意把password设为你的root口令: conn = mysql.connector.connect(user='root', password='123', database='python_test') cursor = conn.cursor() # 创建user表: cursor.execute('create table user (id varchar(20) primary key, name varchar(20))') # 插入一行记录,注意MySQL的占位符是%s: cursor.execute('insert into user (id, name) values (%s, %s)', ['1', 'Michael']) print(cursor.rowcount) # 提交事务: conn.commit() cursor.close() # 运行查询: cursor = conn.cursor() cursor.execute('select * from user where id = %s', ('1',)) values = cursor.fetchall() print(values) # 关闭Cursor和Connection: cursor.close() conn.close() print('-----------------------------------------\n') #SQLAlchemy////////////////////////////////////////////////////////// # 导入: from sqlalchemy import Column, String, create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base # 创建对象的基类: Base = declarative_base() # 定义User对象: class User(Base): # 表的名字: __tablename__ = 'user' # 表的结构: id = Column(String(20), primary_key=True) name = Column(String(20)) # 初始化数据库连接: engine = create_engine('mysql+mysqlconnector://root:123@localhost:3306/python_test') # 创建DBSession类型: DBSession = sessionmaker(bind=engine) #create_engine()用来初始化数据库连接。SQLAlchemy用一个字符串表示连接信息: '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名' #由于有了ORM,我们向数据库表中添加一行记录,可以视为添加一个User对象: # 创建session对象: session = DBSession() # 创建新User对象: new_user = User(id='5', name='Bob') # 添加到session: session.add(new_user) # 提交即保存到数据库: session.commit() # 关闭session: session.close() #可见,关键是获取session,然后把对象添加到session,最后提交并关闭。DBSession对象可视为当前数据库连接。 #如何从数据库表中查询数据呢?有了ORM,查询出来的可以不再是tuple,而是User对象。SQLAlchemy提供的查询接口如下: # 创建Session: session = DBSession() # 创建Query查询,filter是where条件,最后调用one()返回唯一行,如果调用all()则返回所有行: user = session.query(User).filter(User.id=='5').one() # 打印类型和对象的name属性: print('type:', type(user)) print('name:', user.name) # 关闭Session: session.close() print('-----------------------------------------') #创建表------------------------------------------------------------------- # 导入: from sqlalchemy import Column, String, create_engine from sqlalchemy.ext.declarative import declarative_base # 创建对象的基类: Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(String(20), primary_key=True) name = Column(String(20)) class Book(Base): __tablename__ = 'book' id = Column(String(20), primary_key=True) name = Column(String(20)) # “多”的一方的book表是通过外键关联到user表的: user_id = Column(String(20)) engine = create_engine('mysql+mysqlconnector://root:123@localhost:3306/python_test') metadata = Base.metadata metadata.create_all(engine)
apache-2.0
-6,221,566,883,954,505,000
28.697183
90
0.6824
false
1.873778
true
false
false
jirutka/ngx-oauth
integration/support/nginx_server.py
1
1260
import os from os import path import shlex from subprocess import Popen from time import sleep from .util import write_file import requests from requests import ConnectionError from retry import retry __all__ = ['NginxServer'] class NginxServer: def __init__(self, nginx_conf, check_url, temp_dir='.'): conf_path = path.join(temp_dir, 'nginx.conf') write_file(conf_path, nginx_conf) self._command = "nginx -c %s" % conf_path self._ngx_process = None self.check_url = check_url def start(self): self._ngx_process = Popen(shlex.split(self._command)) try: # sanity check resp = self._request_check_url() except ConnectionError as e: self.stop() raise e if resp.status_code != 200: raise IOError("Nginx returned %s for GET %s" % (resp.status_code, self.check_url)) def stop(self): if self._ngx_process is None: return try: self._ngx_process.terminate() sleep(0.2) finally: os.kill(self._ngx_process.pid, 9) @retry(ConnectionError, tries=20, delay=0.1) def _request_check_url(self): return requests.get(self.check_url, verify=False)
mit
-6,519,647,580,464,918,000
25.25
94
0.603175
false
3.662791
false
false
false
supertree-toolkit/stk
stk/stk_import_export.py
1
22750
#!/usr/bin/env python # # Supertree Toolkit. Software for managing and manipulating sources # trees ready for supretree construction. # Copyright (C) 2011, Jon Hill, Katie Davis # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Jon Hill. [email protected]. from StringIO import StringIO import os import sys import math import re import numpy from lxml import etree import stk.nameparser.parser as np import re import supertree_toolkit from copy import deepcopy from supertree_toolkit import _parse_xml import stk_exceptions import stk.p4 import unicodedata import string as python_string def export_to_old(xml, output_dir, verbose=False, ignoreWarnings=False): """ Create an old STK dataset from a PHYML file. Hopefuly not useful in the long run as all functionality will be replicated, but may be useful in the short term """ if not ignoreWarnings: xml = supertree_toolkit.clean_data(xml) # Parse the file and away we go: xml_root = _parse_xml(xml) # First get project name and create the directory find = etree.XPath("//project_name") project_name = find(xml_root)[0].xpath("string_value")[0].text project_name.replace(' ','_') project_dir = os.path.join(output_dir,project_name) try: os.mkdir(project_dir) except OSError: msg = "Directory already exists. " msg += "Please check you are trying to output into the correct directory. If so remove "+project_dir raise stk_exceptions.STKImportExportError(msg) except: msg = "Error making project directory: "+os.path.join(output_dir,project_name) raise stk_exceptions.STKImportExportError(msg) # Loop through the sources find = etree.XPath("//source") find_trees = etree.XPath("//source_tree") sources = find(xml_root) for s in sources: # Make directory name = s.attrib['name'] if (verbose): print "----\nWorking on:" +name if (name == '' or name == None): msg = "One of the sources does not have a valid name. Aborting." raise stk_exceptions.STKImportExportError(msg) source_dir = os.path.join(project_dir,name) os.mkdir(source_dir) # for this source, grab each tree_source and create the sub-directories tree_no = 1 if (verbose): print "Found "+ str(len(s.xpath("source_tree"))) + " trees in this source" for t in s.xpath("source_tree"): tree_dir = os.path.join(source_dir,"Tree_"+str(tree_no)) os.mkdir(tree_dir) # save the tree data tree = t.xpath("tree/tree_string/string_value")[0].text stk.p4.var.warnReadNoFile = False stk.p4.var.trees = [] stk.p4.read(tree) stk.p4.var.warnReadNoFile = True trees = stk.p4.var.trees stk.p4.var.trees = [] tree = trees[0].writeNewick(fName=None,toString=True).strip() out_tree_file = open(os.path.join(tree_dir,name+"_tree_"+str(tree_no)+".tre"),"w") out_tree_file.write('#NEXUS\nBEGIN TREES;\nTree tree_1 = [&u] ') out_tree_file.write(tree) out_tree_file.write("\nENDBLOCK;") out_tree_file.close() # create and save XML create_xml_metadata(etree.tostring(s), etree.tostring(t), os.path.join(tree_dir,name+"_tree_"+str(tree_no))) tree_no += 1 def import_old_data(input_dir, verbose=False): """ Converts an old STK dataset (based on directories) to the new PHYML file format. Note: we need XML files to get the meta data and also that the data imported may not be complete. It's up to the calling program to save the resulting xml string somewhere sensible. """ # strip trailing path separator if one if (input_dir.endswith(os.path.sep)): t = input_dir[0:-1] input_dir = t # Parse the file and away we go: base_xml = """<?xml version='1.0' encoding='utf-8'?> <phylo_storage> <project_name> <string_value lines="1"/> </project_name> <sources> </sources> <history/> </phylo_storage>""" xml_root = etree.fromstring(base_xml) find = etree.XPath("//sources") sources = find(xml_root)[0] # add the project name from the input directory xml_root.xpath("/phylo_storage/project_name/string_value")[0].text = os.path.basename(input_dir) # for each XML nXML = 0; for xml in locate('*.xml', input_dir): # parse XML if (verbose): print "Parsing: "+xml current_xml = etree.parse(xml) # convert into PHYML new_source = convert_to_phyml_source(current_xml) # This is now the source_tree portion of the XML source_tree = convert_to_phyml_sourcetree(current_xml, xml) # add into PHYML sources element append_to_source, already_in = supertree_toolkit.already_in_data(new_source,sources) if (not already_in): # append tree to current source new_source.append(deepcopy(source_tree)) sources.append(deepcopy(new_source)) # deepcopy otherwise it'll add the same one several times :| else: # we need to find the correct source and append the source_tree to this append_to_source.append(deepcopy(source_tree)) nXML += 1 if (nXML == 0): msg = "Didn't find any XML files in this directory" raise stk_exceptions.STKImportExportError(msg) # create all sourcenames phyml = supertree_toolkit.all_sourcenames(etree.tostring(xml_root)) phyml = supertree_toolkit.set_all_tree_names(phyml) return phyml def locate(pattern, root=os.curdir): """Locate all files matching the pattern with the root dir and all subdirectories """ import fnmatch for path, dirs, files in os.walk(os.path.abspath(root)): for filename in fnmatch.filter(files,pattern): yield os.path.join(path, filename) def convert_to_phyml_source(xml_root): """ Converts old STK XML to a new STK source XML block ready for insertion into a PHYML tree """ # parse XML file and extract necessary info find = etree.XPath("//Source") Source = find(xml_root)[0] input_author = Source.xpath('Author')[0].text input_title = Source.xpath('Title')[0].text input_year = Source.xpath('Year')[0].text input_journal = Source.xpath('Journal')[0].text input_volume = Source.xpath('Volume')[0].text input_pages = Source.xpath('Pages')[0].text input_booktitle = Source.xpath('Booktitle')[0].text input_editor = Source.xpath('Editor')[0].text input_publisher = Source.xpath('Publisher')[0].text author_list = [] # split the string using ',', then stich together is needed a = input_author.lower() if isinstance(a, unicode): a = unicodedata.normalize('NFKD', a).encode('ascii','ignore') author_list = a.split(' and ') # authors_t = a.split(',') # authors_temp = [] # if (len(authors_t) > 1): # for a in authors_t: # authors_temp.extend(a.split(' and ')) # # if (len(authors_temp) > 1): # i = 0 # while i<len(authors_temp): # if (i+1 < len(authors_temp)): # m = re.search('\.', authors_temp[i+1]) # if (m != None): # # next token contains a full stop so is probably an initial # author_list.append(str.strip(authors_temp[i+1]) + " " + str.strip(authors_temp[i])) # i += 2 # else: # author_list.append(authors_temp[i]) # i += 1 # else: # author_list.append(authors_temp[i]) # i += 1 # else: # author_list = a.split('and') if (len(author_list) == 0): author_list.append(input_author) phyml_root = etree.Element("source") publication = etree.SubElement(phyml_root,"bibliographic_information") # does it contain a booktitle? contains_booktitle = False if (contains_booktitle): article = etree.SubElement(publication,"book") else: article = etree.SubElement(publication,"article") authors = etree.SubElement(article,"authors") # now parse authors into something sensible # authors - parse into full author names, then use nameparse to extract first and last for a in author_list: # further munging of name a = a.strip() bits = a.split(',') if (len(bits) > 1): a = bits[1].strip()+" "+bits[0].strip() o = np.HumanName(a) ae = etree.SubElement(authors,'author') surname = etree.SubElement(ae,'surname') string = etree.SubElement(surname,'string_value') string.attrib['lines'] = "1" string.text = python_string.capwords(o.last) if (o.last.capitalize() == ''): string.text = a first = etree.SubElement(ae,'other_names') string = etree.SubElement(first,'string_value') string.attrib['lines'] = "1" other = python_string.capwords(o.first) string.text = other # reset to empty if needed if (o.first == None): string.text = '' # title and the publication data title = etree.SubElement(article,"title") string = etree.SubElement(title,"string_value") string.attrib['lines'] = "1" string.text = input_title volume = etree.SubElement(article,"volume") string = etree.SubElement(volume,"string_value") string.attrib['lines'] = "1" string.text = input_volume year = etree.SubElement(article,"year") integer = etree.SubElement(year,"integer_value") integer.attrib['rank'] = "0" integer.text = input_year journal = etree.SubElement(article,"journal") string = etree.SubElement(journal,"string_value") string.attrib['lines'] = "1" string.text = input_journal pages = etree.SubElement(article,"pages") string = etree.SubElement(pages,"string_value") string.attrib['lines'] = "1" string.text = input_pages return phyml_root def convert_to_phyml_sourcetree(input_xml, xml_file): """ Extract the source_tree data from the old-style XML and create an XML tree inthe new style. We leave it to the main program to check that we append or add the source """ # get tree filename from current_xml find_treefiles = etree.XPath('//TreeFile') treefile = find_treefiles(input_xml)[0].text # now stick on the root path of the XML to get the full path of the treefile cur_dir = os.path.split(xml_file)[0] try: tree = supertree_toolkit.import_tree(os.path.join(cur_dir,treefile)) except stk_exceptions.TreeParseError as detail: msg = "***Error: failed to parse a tree in your data set.\n" msg += "File is: "+treefile+"\n"+detail.msg print msg return except IOError: # try just the file if we failed - windows formatted treefile = treefile.rsplit('\\')[-1] try: tree = supertree_toolkit.import_tree(os.path.join(cur_dir,treefile)) except stk_exceptions.TreeParseError as detail: msg = "***Error: failed to parse a tree in your data set.\n" msg += "File is: "+treefile+"\n"+detail.msg print msg return # all other data find_mol = etree.XPath('//Characters/Molecular/Type') find_morph = etree.XPath('//Characters/Morphological/Type') find_behave = etree.XPath('//Characters/Behavioural/Type') find_other = etree.XPath('//Characters/Other/Type') taxa_type = input_xml.xpath('/SourceTree/Taxa')[0].attrib['fossil'] if (taxa_type == "some"): mixed = True allextant = False allfossil = False elif (taxa_type == "all"): mixed = False allextant = False allfossil = True elif (taxa_type == "none"): mixed = False allextant = True allfossil = False else: print "Unknown taxa types in "+xml_file print "Setting to mixed fossil and extant so you have to correct this later" mixed = True allextant = False allfossil = False # analysis input_comments = input_xml.xpath('/SourceTree/Notes')[0].text input_analysis = input_xml.xpath('/SourceTree/Analysis/Type')[0].text # Theres a translation to be done here if (input_analysis == "MP"): input_analysis = "Maximum Parsimony" if (input_analysis == "ML"): input_analysis = "Maximum Likelihood" # construct new XML source_tree = etree.Element("source_tree") # tree data tree_ele = etree.SubElement(source_tree,"tree") tree_string = etree.SubElement(tree_ele,"tree_string") string = etree.SubElement(tree_string,"string_value") string.attrib["lines"] = "1" string.text = tree # comment if (not input_comments == None): comment = etree.SubElement(tree_string,"comment") comment.text = input_comments # Figure and page number stuff figure_legend = etree.SubElement(tree_ele,"figure_legend") figure_legend.tail="\n " figure_legend_string = etree.SubElement(figure_legend,"string_value") figure_legend_string.tail="\n " figure_legend_string.attrib['lines'] = "1" figure_legend_string.text = "NA" figure_number = etree.SubElement(tree_ele,"figure_number") figure_number.tail="\n " figure_number_string = etree.SubElement(figure_number,"string_value") figure_number_string.tail="\n " figure_number_string.attrib['lines'] = "1" figure_number_string.text = "0" page_number = etree.SubElement(tree_ele,"page_number") page_number.tail="\n " page_number_string = etree.SubElement(page_number,"string_value") page_number_string.tail="\n " page_number_string.attrib['lines'] = "1" tree_inference = etree.SubElement(tree_ele,"tree_inference") optimality_criterion = etree.SubElement(tree_inference,"optimality_criterion") # analysis optimality_criterion.attrib['name'] = input_analysis # taxa data taxa_data = etree.SubElement(source_tree,"taxa_data") if (allfossil): taxa_type = etree.SubElement(taxa_data,"all_fossil") elif (allextant): taxa_type = etree.SubElement(taxa_data,"all_extant") else: taxa_type = etree.SubElement(taxa_data,"mixed_fossil_and_extant") # We *should* add a taxon here to make sure this is valid # phyml according to the schema. However, in doin so we will fail the # taxon check as we don't know which taxon (or taxa) is a fossil, as # this in formation is not recorded in the old STK XML files. # We therefore leave this commented out as a reminder to the # next soul to edit this #taxon = etree.SubElement(taxa_type,"taxon") character_data = etree.SubElement(source_tree,"character_data") # loop over characters add correctly chars = find_mol(input_xml) for c in chars: new_char = etree.SubElement(character_data,"character") new_char.attrib['type'] = "molecular" new_char.attrib['name'] = c.text chars = find_morph(input_xml) for c in chars: new_char = etree.SubElement(character_data,"character") new_char.attrib['type'] = "morphological" new_char.attrib['name'] = c.text chars = find_behave(input_xml) for c in chars: new_char = etree.SubElement(character_data,"character") new_char.attrib['type'] = "behavioural" new_char.attrib['name'] = c.text chars = find_other(input_xml) for c in chars: new_char = etree.SubElement(character_data,"character") new_char.attrib['type'] = "other" new_char.attrib['name'] = c.text return source_tree def create_xml_metadata(XML_string, this_source, filename): """ Converts a PHYML source block to the old style XML file""" XML = etree.fromstring(XML_string) source_XML = etree.fromstring(this_source) # from file name we can construct new tree object try: stk.p4.var.warnReadNoFile = False stk.p4.var.trees = [] stk.p4.read(filename+'.tre') stk.p4.var.warnReadNoFile = True except: raise stk_exceptions.TreeParseError("Error parsing " + filename) trees = stk.p4.var.trees stk.p4.var.trees = [] tree = trees[0] taxa_list = tree.getAllLeafNames(0) new_xml = etree.Element("SourceTree") # The source publication info source = etree.SubElement(new_xml,"Source") author = etree.SubElement(source,"Author") find_authors = etree.XPath("//author") authors = find_authors(XML) authors_list = '' for a in authors: s = a.xpath('surname/string_value')[0].text o = '' try: o = a.xpath('other_names/string_value')[0].text except: pass if (authors_list != ''): authors_list = authors_list+" and " authors_list += s if (not o == ''): authors_list += ", "+o+"." author.text = authors_list year = etree.SubElement(source,"Year") year.text = XML.xpath("//year/integer_value")[0].text title = etree.SubElement(source,"Title") title.text = XML.xpath("//title/string_value")[0].text journal = etree.SubElement(source,"Journal") if (len(XML.xpath("//journal/string_value")) > 0): journal.text = XML.xpath("//journal/string_value")[0].text volume = etree.SubElement(source,"Volume") if (len(XML.xpath("//volume/string_value")) > 0): volume.text = XML.xpath("//volume/string_value")[0].text book = etree.SubElement(source,"Booktitle") if (len(XML.xpath("//booktitle/string_value")) > 0): book.text = XML.xpath("//booktitle/string_value")[0].text page = etree.SubElement(source,"Pages") if (len(XML.xpath("//pages/string_value")) > 0): tmp_txt = XML.xpath("//pages/string_value")[0].text if not tmp_txt == None: tmp_txt = tmp_txt.replace("&#8211;","-") else: tmp_txt = "" page.text = tmp_txt editor = etree.SubElement(source,"Editor") find_editors= etree.XPath("//editor/surname") surnames = find_editors(XML) authors_list = '' for s in surnames: if (authors_list != ''): authors_list = authors_list+" and " authors_list += s.xpath('string_value')[0].text editor.text = authors_list publisher = etree.SubElement(source, "Publisher") if (len(XML.xpath("//publisher/string_value")) > 0): publisher.text = XML.xpath("//publisher/string_value")[0].text # The taxa info taxa = etree.SubElement(new_xml,"Taxa") # add List for the number of taxa for t in taxa_list: l = etree.SubElement(taxa, "List") t = t.replace('_',' ') l.text = t # if we find any taxa will fossil switched on, then add fossil attribute find_fossil = etree.XPath("//fossil") if (len(find_fossil(source_XML)) == 0): taxa.attrib['fossil'] = 'none' elif (len(find_fossil(source_XML)) == len(taxa_list)): taxa.attrib['fossil'] = 'all' else: taxa.attrib['fossil'] = 'some' taxa.attrib['number'] = str(len(taxa_list)) # character data character = etree.SubElement(new_xml,"Characters") find_characters = etree.XPath("//character") characters_phyml = find_characters(source_XML) nMolecular = 0 nMorpho = 0 nBehaviour = 0 nOther = 0 molecular = etree.SubElement(character,"Molecular") morphological = etree.SubElement(character,"Morphological") behavioural = etree.SubElement(character,"Behavioural") other = etree.SubElement(character,"Other") for c in characters_phyml: if c.attrib['type'] == 'molecular': l = etree.SubElement(molecular,"Type") l.text = c.attrib['name'] nMolecular += 1 if c.attrib['type'] == 'behavioural': l = etree.SubElement(behavioural,"Type") l.text = c.attrib['name'] nBehaviour += 1 if c.attrib['type'] == 'morphological': l = etree.SubElement(morphological,"Type") l.text = c.attrib['name'] nMorpho += 1 if c.attrib['type'] == 'other': l = etree.SubElement(other,"Type") l.text = c.attrib['name'] nOther += 0 if (nMolecular > 0): molecular.attrib['number'] = str(nMolecular) if (nBehaviour > 0): behavioural.attrib['number'] = str(nBehaviour) if (nMorpho > 0): morphological.attrib['number'] = str(nMorpho) if (nOther > 0): other.attrib['number'] = str(nOther) # analysis data analysis = etree.SubElement(new_xml,"Analysis") find_analysis = etree.XPath("//analysis") analysis_phyml = find_analysis(source_XML) for a in analysis_phyml: l = etree.SubElement(analysis,"Type") l.text = a.attrib['name'] # tree file - same directory :) tree_f = etree.SubElement(new_xml,"TreeFile") tree_file_only = os.path.basename(filename) tree_file_only += '.tre' tree_f.text = tree_file_only # Grab any comments under the tree and add it here notes = etree.SubElement(new_xml,'Notes') find_comments = etree.XPath("//comment") comments_phyml = find_comments(source_XML) comments = "" for c in comments_phyml: if (not c.text == None): if (not comments == ""): comments = "\n" + c.text else: comments += c.text notes.text = comments xml_string = etree.tostring(new_xml, encoding='iso-8859-1', pretty_print=True) f = open(filename+'.xml','w') f.write(xml_string) f.close() #def _capitalise_source_name(name): # "Capiltalises a source name, taking into account etal # smith_jones_2003 -> Smith_Jones_2003 # smith_etal_2003 -> Smith_etal_2003 # etc # """
gpl-3.0
6,061,260,257,360,899,000
35.871961
120
0.617275
false
3.612832
false
false
false
divio/askbot-devel
askbot/views/writers.py
1
39556
# encoding:utf-8 """ :synopsis: views diplaying and processing main content post forms This module contains views that allow adding, editing, and deleting main textual content. """ import datetime import logging import os import os.path import random import sys import tempfile import time from django.shortcuts import get_object_or_404 from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.http import HttpResponse from django.http import HttpResponseBadRequest from django.http import HttpResponseForbidden from django.http import HttpResponseRedirect from django.http import Http404 from django.utils import simplejson from django.utils.html import strip_tags, escape from django.utils.translation import get_language from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from django.core.urlresolvers import reverse from django.core import exceptions from django.conf import settings from django.views.decorators import csrf from django.contrib.auth.models import User from askbot import exceptions as askbot_exceptions from askbot import forms from askbot import models from askbot import signals from askbot.conf import settings as askbot_settings from askbot.utils import decorators from askbot.utils.forms import format_errors from askbot.utils.functions import diff_date from askbot.utils import url_utils from askbot.utils.file_utils import store_file from askbot.utils.loading import load_module from askbot.views import context from askbot.templatetags import extra_filters_jinja as template_filters from askbot.importers.stackexchange import management as stackexchange#todo: may change from askbot.utils.slug import slugify from recaptcha_works.decorators import fix_recaptcha_remote_ip # used in index page INDEX_PAGE_SIZE = 20 INDEX_AWARD_SIZE = 15 INDEX_TAGS_SIZE = 100 # used in tags list DEFAULT_PAGE_SIZE = 60 # used in questions QUESTIONS_PAGE_SIZE = 10 # used in answers ANSWERS_PAGE_SIZE = 10 #todo: make this work with csrf @csrf.csrf_exempt def upload(request):#ajax upload file to a question or answer """view that handles file upload via Ajax """ # check upload permission result = '' error = '' new_file_name = '' try: #may raise exceptions.PermissionDenied result, error, file_url, orig_file_name = None, '', None, None if request.user.is_anonymous(): msg = _('Sorry, anonymous users cannot upload files') raise exceptions.PermissionDenied(msg) request.user.assert_can_upload_file() #todo: build proper form validation file_name_prefix = request.POST.get('file_name_prefix', '') if file_name_prefix not in ('', 'group_logo_'): raise exceptions.PermissionDenied('invalid upload file name prefix') #todo: check file type uploaded_file = request.FILES['file-upload']#take first file orig_file_name = uploaded_file.name #todo: extension checking should be replaced with mimetype checking #and this must be part of the form validation file_extension = os.path.splitext(orig_file_name)[1].lower() if not file_extension in settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES: file_types = "', '".join(settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES) msg = _("allowed file types are '%(file_types)s'") % \ {'file_types': file_types} raise exceptions.PermissionDenied(msg) # generate new file name and storage object file_storage, new_file_name, file_url = store_file( uploaded_file, file_name_prefix ) # check file size # byte size = file_storage.size(new_file_name) if size > settings.ASKBOT_MAX_UPLOAD_FILE_SIZE: file_storage.delete(new_file_name) msg = _("maximum upload file size is %(file_size)sK") % \ {'file_size': settings.ASKBOT_MAX_UPLOAD_FILE_SIZE} raise exceptions.PermissionDenied(msg) except exceptions.PermissionDenied, e: error = unicode(e) except Exception, e: logging.critical(unicode(e)) error = _('Error uploading file. Please contact the site administrator. Thank you.') if error == '': result = 'Good' else: result = '' file_url = '' #data = simplejson.dumps({ # 'result': result, # 'error': error, # 'file_url': file_url #}) #return HttpResponse(data, mimetype = 'application/json') xml_template = "<result><msg><![CDATA[%s]]></msg><error><![CDATA[%s]]></error><file_url>%s</file_url><orig_file_name><![CDATA[%s]]></orig_file_name></result>" xml = xml_template % (result, error, file_url, orig_file_name) return HttpResponse(xml, content_type="application/xml") def __import_se_data(dump_file): """non-view function that imports the SE data in the future may import other formats as well In this function stdout is temporarily redirected, so that the underlying importer management command could stream the output to the browser todo: maybe need to add try/except clauses to restore the redirects in the exceptional situations """ fake_stdout = tempfile.NamedTemporaryFile() real_stdout = sys.stdout sys.stdout = fake_stdout importer = stackexchange.ImporterThread(dump_file = dump_file.name) importer.start() #run a loop where we'll be reading output of the #importer tread and yielding it to the caller read_stdout = open(fake_stdout.name, 'r') file_pos = 0 fd = read_stdout.fileno() yield '<html><body><style>* {font-family: sans;} p {font-size: 12px; line-height: 16px; margin: 0; padding: 0;}</style><h1>Importing your data. This may take a few minutes...</h1>' while importer.isAlive(): c_size = os.fstat(fd).st_size if c_size > file_pos: line = read_stdout.readline() yield '<p>' + line + '</p>' file_pos = read_stdout.tell() fake_stdout.close() read_stdout.close() dump_file.close() sys.stdout = real_stdout yield '<p>Done. Please, <a href="%s">Visit Your Forum</a></p></body></html>' % reverse('index') @csrf.csrf_protect def import_data(request): """a view allowing the site administrator upload stackexchange data """ #allow to use this view to site admins #or when the forum in completely empty if request.user.is_anonymous() or (not request.user.is_administrator()): if models.Post.objects.get_questions().exists(): raise Http404 if request.method == 'POST': #if not request.is_ajax(): # raise Http404 form = forms.DumpUploadForm(request.POST, request.FILES) if form.is_valid(): dump_file = form.cleaned_data['dump_file'] dump_storage = tempfile.NamedTemporaryFile() #save the temp file for chunk in dump_file.chunks(): dump_storage.write(chunk) dump_storage.flush() return HttpResponse(__import_se_data(dump_storage)) #yield HttpResponse(_('StackExchange import complete.'), content_type='text/plain') #dump_storage.close() else: form = forms.DumpUploadForm() data = { 'dump_upload_form': form, 'need_configuration': (not stackexchange.is_ready()) } return render(request, 'import_data.html', data) @fix_recaptcha_remote_ip @csrf.csrf_protect @decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts')) @decorators.check_spam('text') def ask(request):#view used to ask a new question """a view to ask a new question gives space for q title, body, tags and checkbox for to post as wiki user can start posting a question anonymously but then must login/register in order for the question go be shown """ if request.user.is_authenticated(): if request.user.is_read_only(): referer = request.META.get("HTTP_REFERER", reverse('questions')) request.user.message_set.create(message=_('Sorry, but you have only read access')) return HttpResponseRedirect(referer) if askbot_settings.READ_ONLY_MODE_ENABLED: return HttpResponseRedirect(reverse('index')) if request.method == 'POST': form = forms.AskForm(request.POST, user=request.user) if form.is_valid(): timestamp = datetime.datetime.now() title = form.cleaned_data['title'] wiki = form.cleaned_data['wiki'] tagnames = form.cleaned_data['tags'] text = form.cleaned_data['text'] ask_anonymously = form.cleaned_data['ask_anonymously'] post_privately = form.cleaned_data['post_privately'] group_id = form.cleaned_data.get('group_id', None) language = form.cleaned_data.get('language', None) if request.user.is_authenticated(): drafts = models.DraftQuestion.objects.filter(author=request.user) drafts.delete() user = form.get_post_user(request.user) elif request.user.is_anonymous() and askbot_settings.ALLOW_ASK_UNREGISTERED: user = models.get_or_create_anonymous_user() ask_anonymously = True else: user = None if user: try: question = user.post_question( title=title, body_text=text, tags=tagnames, wiki=wiki, is_anonymous=ask_anonymously, is_private=post_privately, timestamp=timestamp, group_id=group_id, language=language, ip_addr=request.META.get('REMOTE_ADDR') ) signals.new_question_posted.send(None, question=question, user=user, form_data=form.cleaned_data ) return HttpResponseRedirect(question.get_absolute_url()) except exceptions.PermissionDenied, e: request.user.message_set.create(message = unicode(e)) return HttpResponseRedirect(reverse('index')) else: request.session.flush() session_key=request.session.session_key models.AnonymousQuestion.objects.create( session_key=session_key, title=title, tagnames=tagnames, wiki=wiki, is_anonymous=ask_anonymously, text=text, added_at=timestamp, ip_addr=request.META.get('REMOTE_ADDR'), ) return HttpResponseRedirect(url_utils.get_login_url()) if request.method == 'GET': form = forms.AskForm(user=request.user) draft_title = '' draft_text = '' draft_tagnames = '' if request.user.is_authenticated(): drafts = models.DraftQuestion.objects.filter(author=request.user) if len(drafts) > 0: draft = drafts[0] draft_title = draft.title draft_text = draft.text draft_tagnames = draft.tagnames form.initial = { 'ask_anonymously': request.REQUEST.get('ask_anonymously', False), 'tags': request.REQUEST.get('tags', draft_tagnames), 'text': request.REQUEST.get('text', draft_text), 'title': request.REQUEST.get('title', draft_title), 'post_privately': request.REQUEST.get('post_privately', False), 'language': get_language(), 'wiki': request.REQUEST.get('wiki', False), } if 'group_id' in request.REQUEST: try: group_id = int(request.GET.get('group_id', None)) form.initial['group_id'] = group_id except Exception: pass editor_is_folded = (askbot_settings.QUESTION_BODY_EDITOR_MODE=='folded' and \ askbot_settings.MIN_QUESTION_BODY_LENGTH==0 and \ form.initial['text'] == '') data = { 'active_tab': 'ask', 'page_class': 'ask-page', 'form' : form, 'editor_is_folded': editor_is_folded, 'mandatory_tags': models.tag.get_mandatory_tags(), 'email_validation_faq_url':reverse('faq') + '#validate', 'category_tree_data': askbot_settings.CATEGORY_TREE, 'tag_names': list()#need to keep context in sync with edit_question for tag editor } data.update(context.get_for_tag_editor()) return render(request, 'ask.html', data) @login_required @csrf.csrf_protect def retag_question(request, id): """retag question view """ question = get_object_or_404(models.Post, id=id) try: request.user.assert_can_retag_question(question) if request.method == 'POST': form = forms.RetagQuestionForm(question, request.POST) if form.is_valid(): if form.has_changed(): request.user.retag_question(question=question, tags=form.cleaned_data['tags']) if request.is_ajax(): response_data = { 'success': True, 'new_tags': question.thread.tagnames } if request.user.message_set.count() > 0: #todo: here we will possibly junk messages message = request.user.get_and_delete_messages()[-1] response_data['message'] = message data = simplejson.dumps(response_data) return HttpResponse(data, content_type="application/json") else: return HttpResponseRedirect(question.get_absolute_url()) elif request.is_ajax(): response_data = { 'message': format_errors(form.errors['tags']), 'success': False } data = simplejson.dumps(response_data) return HttpResponse(data, content_type="application/json") else: form = forms.RetagQuestionForm(question) data = { 'active_tab': 'questions', 'question': question, 'form' : form, } return render(request, 'question_retag.html', data) except exceptions.PermissionDenied, e: if request.is_ajax(): response_data = { 'message': unicode(e), 'success': False } data = simplejson.dumps(response_data) return HttpResponse(data, content_type="application/json") else: request.user.message_set.create(message = unicode(e)) return HttpResponseRedirect(question.get_absolute_url()) @login_required @csrf.csrf_protect @decorators.check_spam('text') @fix_recaptcha_remote_ip def edit_question(request, id): """edit question view """ question = get_object_or_404(models.Post, id=id) if askbot_settings.READ_ONLY_MODE_ENABLED: return HttpResponseRedirect(question.get_absolute_url()) try: revision = question.revisions.get(revision=0) except models.PostRevision.DoesNotExist: revision = question.get_latest_revision() revision_form = None try: request.user.assert_can_edit_question(question) if request.method == 'POST': if request.POST['select_revision'] == 'true': #revert-type edit - user selected previous revision revision_form = forms.RevisionForm( question, revision, request.POST ) if revision_form.is_valid(): # Replace with those from the selected revision rev_id = revision_form.cleaned_data['revision'] revision = question.revisions.get(revision = rev_id) form = forms.EditQuestionForm( question=question, user=request.user, revision=revision ) else: form = forms.EditQuestionForm( request.POST, question=question, user=question.user, revision=revision ) else:#new content edit # Always check modifications against the latest revision form = forms.EditQuestionForm( request.POST, question=question, revision=revision, user=request.user, ) revision_form = forms.RevisionForm(question, revision) if form.is_valid(): if form.has_changed(): if form.can_edit_anonymously() and form.cleaned_data['reveal_identity']: question.thread.remove_author_anonymity() question.is_anonymous = False is_wiki = form.cleaned_data.get('wiki', question.wiki) post_privately = form.cleaned_data['post_privately'] suppress_email = form.cleaned_data['suppress_email'] user = form.get_post_user(request.user) user.edit_question( question=question, title=form.cleaned_data['title'], body_text=form.cleaned_data['text'], revision_comment=form.cleaned_data['summary'], tags=form.cleaned_data['tags'], wiki=is_wiki, edit_anonymously=form.cleaned_data['edit_anonymously'], is_private=post_privately, suppress_email=suppress_email, ip_addr=request.META.get('REMOTE_ADDR') ) if 'language' in form.cleaned_data: question.thread.set_language_code(form.cleaned_data['language']) return HttpResponseRedirect(question.get_absolute_url()) else: #request type was "GET" revision_form = forms.RevisionForm(question, revision) initial = { 'language': question.thread.language_code, 'post_privately': question.is_private(), 'wiki': question.wiki } form = forms.EditQuestionForm( question=question, revision=revision, user=request.user, initial=initial ) data = { 'page_class': 'edit-question-page', 'active_tab': 'questions', 'question': question, 'revision': revision, 'revision_form': revision_form, 'mandatory_tags': models.tag.get_mandatory_tags(), 'form' : form, 'tag_names': question.thread.get_tag_names(), 'category_tree_data': askbot_settings.CATEGORY_TREE } data.update(context.get_for_tag_editor()) return render(request, 'question_edit.html', data) except exceptions.PermissionDenied, e: request.user.message_set.create(message = unicode(e)) return HttpResponseRedirect(question.get_absolute_url()) @login_required @csrf.csrf_protect @decorators.check_spam('text') @fix_recaptcha_remote_ip def edit_answer(request, id): answer = get_object_or_404(models.Post, id=id) if askbot_settings.READ_ONLY_MODE_ENABLED: return HttpResponseRedirect(answer.get_absolute_url()) try: revision = answer.revisions.get(revision=0) except models.PostRevision.DoesNotExist: revision = answer.get_latest_revision() class_path = getattr(settings, 'ASKBOT_EDIT_ANSWER_FORM', None) if class_path: edit_answer_form_class = load_module(class_path) else: edit_answer_form_class = forms.EditAnswerForm try: request.user.assert_can_edit_answer(answer) if request.method == "POST": if request.POST['select_revision'] == 'true': # user has changed revistion number revision_form = forms.RevisionForm( answer, revision, request.POST ) if revision_form.is_valid(): # Replace with those from the selected revision rev = revision_form.cleaned_data['revision'] revision = answer.revisions.get(revision = rev) form = edit_answer_form_class( answer, revision, user=request.user ) else: form = edit_answer_form_class( answer, revision, request.POST, user=request.user ) else: form = edit_answer_form_class( answer, revision, request.POST, user=request.user ) revision_form = forms.RevisionForm(answer, revision) if form.is_valid(): if form.has_changed(): user = form.get_post_user(request.user) suppress_email = form.cleaned_data['suppress_email'] is_private = form.cleaned_data.get('post_privately', False) user.edit_answer( answer=answer, body_text=form.cleaned_data['text'], revision_comment=form.cleaned_data['summary'], wiki=form.cleaned_data.get('wiki', answer.wiki), is_private=is_private, suppress_email=suppress_email, ip_addr=request.META.get('REMOTE_ADDR') ) signals.answer_edited.send(None, answer=answer, user=user, form_data=form.cleaned_data ) return HttpResponseRedirect(answer.get_absolute_url()) else: revision_form = forms.RevisionForm(answer, revision) form = edit_answer_form_class(answer, revision, user=request.user) if request.user.can_make_group_private_posts(): form.initial['post_privately'] = answer.is_private() data = { 'page_class': 'edit-answer-page', 'active_tab': 'questions', 'answer': answer, 'revision': revision, 'revision_form': revision_form, 'form': form, } extra_context = context.get_extra( 'ASKBOT_EDIT_ANSWER_PAGE_EXTRA_CONTEXT', request, data ) data.update(extra_context) return render(request, 'answer_edit.html', data) except exceptions.PermissionDenied, e: request.user.message_set.create(message = unicode(e)) return HttpResponseRedirect(answer.get_absolute_url()) #todo: rename this function to post_new_answer @decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts')) @decorators.check_spam('text') @fix_recaptcha_remote_ip def answer(request, id, form_class=forms.AnswerForm):#process a new answer """view that posts new answer anonymous users post into anonymous storage and redirected to login page authenticated users post directly """ question = get_object_or_404(models.Post, post_type='question', id=id) if askbot_settings.READ_ONLY_MODE_ENABLED: return HttpResponseRedirect(question.get_absolute_url()) if request.method == "POST": #this check prevents backward compatilibility if form_class == forms.AnswerForm: custom_class_path = getattr(settings, 'ASKBOT_NEW_ANSWER_FORM', None) if custom_class_path: form_class = load_module(custom_class_path) else: form_class = forms.AnswerForm form = form_class(request.POST, user=request.user) if form.is_valid(): if request.user.is_authenticated(): drafts = models.DraftAnswer.objects.filter( author=request.user, thread=question.thread ) drafts.delete() user = form.get_post_user(request.user) try: answer = form.save( question, user, ip_addr=request.META.get('REMOTE_ADDR') ) signals.new_answer_posted.send(None, answer=answer, user=user, form_data=form.cleaned_data ) return HttpResponseRedirect(answer.get_absolute_url()) except askbot_exceptions.AnswerAlreadyGiven, e: request.user.message_set.create(message = unicode(e)) answer = question.thread.get_answers_by_user(user)[0] return HttpResponseRedirect(answer.get_absolute_url()) except exceptions.PermissionDenied, e: request.user.message_set.create(message = unicode(e)) else: request.session.flush() models.AnonymousAnswer.objects.create( question=question, wiki=form.cleaned_data['wiki'], text=form.cleaned_data['text'], session_key=request.session.session_key, ip_addr=request.META.get('REMOTE_ADDR'), ) return HttpResponseRedirect(url_utils.get_login_url()) return HttpResponseRedirect(question.get_absolute_url()) def __generate_comments_json(obj, user, avatar_size): """non-view generates json data for the post comments """ models.Post.objects.precache_comments(for_posts=[obj], visitor=user) comments = obj._cached_comments # {"Id":6,"PostId":38589,"CreationDate":"an hour ago","Text":"hello there!","UserDisplayName":"Jarrod Dixon","UserUrl":"/users/3/jarrod-dixon","DeleteUrl":null} json_comments = [] for comment in comments: if user and user.is_authenticated(): try: user.assert_can_delete_comment(comment) #/posts/392845/comments/219852/delete #todo translate this url is_deletable = True except exceptions.PermissionDenied: is_deletable = False is_editable = template_filters.can_edit_comment(user, comment) else: is_deletable = False is_editable = False comment_owner = comment.author tz = ' ' + template_filters.TIMEZONE_STR comment_data = {'id' : comment.id, 'object_id': obj.id, 'comment_added_at': str(comment.added_at.replace(microsecond = 0)) + tz, 'html': comment.html, 'user_display_name': escape(comment_owner.username), 'user_profile_url': comment_owner.get_profile_url(), 'user_avatar_url': comment_owner.get_avatar_url(avatar_size), 'user_id': comment_owner.id, 'user_is_administrator': comment_owner.is_administrator(), 'user_is_moderator': comment_owner.is_moderator(), 'is_deletable': is_deletable, 'is_editable': is_editable, 'points': comment.points, 'score': comment.points, #to support js 'upvoted_by_user': getattr(comment, 'upvoted_by_user', False) } json_comments.append(comment_data) data = simplejson.dumps(json_comments) return HttpResponse(data, content_type="application/json") @csrf.csrf_protect @decorators.check_spam('comment') def post_comments(request):#generic ajax handler to load comments to an object """todo: fixme: post_comments is ambigous: means either get comments for post or add a new comment to post """ # only support get post comments by ajax now post_type = request.REQUEST.get('post_type', '') if not request.is_ajax() or post_type not in ('question', 'answer'): raise Http404 # TODO: Shouldn't be 404! More like 400, 403 or sth more specific if post_type == 'question' \ and askbot_settings.QUESTION_COMMENTS_ENABLED == False: raise Http404 elif post_type == 'answer' \ and askbot_settings.ANSWER_COMMENTS_ENABLED == False: raise Http404 user = request.user if request.method == 'POST': form = forms.NewCommentForm(request.POST) elif request.method == 'GET': form = forms.GetCommentDataForPostForm(request.GET) if form.is_valid() == False: return HttpResponseBadRequest( _('This content is forbidden'), mimetype='application/json' ) post_id = form.cleaned_data['post_id'] avatar_size = form.cleaned_data['avatar_size'] try: post = models.Post.objects.get(id=post_id) except models.Post.DoesNotExist: return HttpResponseBadRequest( _('Post not found'), mimetype='application/json' ) if request.method == "GET": response = __generate_comments_json(post, user, avatar_size) elif request.method == "POST": try: if user.is_anonymous(): msg = _('Sorry, you appear to be logged out and ' 'cannot post comments. Please ' '<a href="%(sign_in_url)s">sign in</a>.') % \ {'sign_in_url': url_utils.get_login_url()} raise exceptions.PermissionDenied(msg) if askbot_settings.READ_ONLY_MODE_ENABLED: raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE) comment = user.post_comment( parent_post=post, body_text=form.cleaned_data['comment'], ip_addr=request.META.get('REMOTE_ADDR') ) signals.new_comment_posted.send(None, comment=comment, user=user, form_data=form.cleaned_data ) response = __generate_comments_json(post, user, avatar_size) except exceptions.PermissionDenied, e: response = HttpResponseForbidden(unicode(e), content_type="application/json") return response @csrf.csrf_protect @decorators.ajax_only #@decorators.check_spam('comment') def edit_comment(request): if request.user.is_anonymous(): raise exceptions.PermissionDenied(_('Sorry, anonymous users cannot edit comments')) if askbot_settings.READ_ONLY_MODE_ENABLED: raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE) form = forms.EditCommentForm(request.POST) if form.is_valid() == False: raise exceptions.PermissionDenied('This content is forbidden') comment_post = models.Post.objects.get( post_type='comment', id=form.cleaned_data['comment_id'] ) revision = request.user.edit_comment( comment_post=comment_post, body_text=form.cleaned_data['comment'], suppress_email=form.cleaned_data['suppress_email'], ip_addr=request.META.get('REMOTE_ADDR'), ) is_deletable = template_filters.can_delete_comment( comment_post.author, comment_post) is_editable = template_filters.can_edit_comment( comment_post.author, comment_post) tz = ' ' + template_filters.TIMEZONE_STR tz = template_filters.TIMEZONE_STR timestamp = str(comment_post.added_at.replace(microsecond=0)) + tz #need this because the post.text is due to the latest approved #revision, but we may need the suggested revision comment_post.text = revision.text comment_post.html = comment_post.parse_post_text()['html'] return { 'id' : comment_post.id, 'object_id': comment_post.parent.id, 'comment_added_at': timestamp, 'html': comment_post.html, 'user_display_name': escape(comment_post.author.username), 'user_url': comment_post.author.get_profile_url(), 'user_id': comment_post.author.id, 'is_deletable': is_deletable, 'is_editable': is_editable, 'score': comment_post.points, #to support unchanged js 'points': comment_post.points, 'voted': comment_post.is_upvoted_by(request.user), } @csrf.csrf_protect def delete_comment(request): """ajax handler to delete comment """ try: if request.user.is_anonymous(): msg = _('Sorry, you appear to be logged out and ' 'cannot delete comments. Please ' '<a href="%(sign_in_url)s">sign in</a>.') % \ {'sign_in_url': url_utils.get_login_url()} raise exceptions.PermissionDenied(msg) if request.is_ajax(): form = forms.ProcessCommentForm(request.POST) if form.is_valid() == False: return HttpResponseBadRequest() comment_id = form.cleaned_data['comment_id'] comment = get_object_or_404(models.Post, post_type='comment', id=comment_id) request.user.assert_can_delete_comment(comment) if askbot_settings.READ_ONLY_MODE_ENABLED: raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE) parent = comment.parent comment.delete() #attn: recalc denormalized field parent.comment_count = parent.comments.count() parent.save() parent.thread.reset_cached_data() avatar_size = form.cleaned_data['avatar_size'] return __generate_comments_json(parent, request.user, avatar_size) raise exceptions.PermissionDenied( _('sorry, we seem to have some technical difficulties') ) except exceptions.PermissionDenied, e: return HttpResponseForbidden( unicode(e), mimetype = 'application/json' ) @login_required @decorators.post_only @csrf.csrf_protect def comment_to_answer(request): if request.user.is_anonymous(): msg = _('Sorry, only logged in users can convert comments to answers. ' 'Please <a href="%(sign_in_url)s">sign in</a>.') % \ {'sign_in_url': url_utils.get_login_url()} raise exceptions.PermissionDenied(msg) form = forms.ConvertCommentForm(request.POST) if form.is_valid() == False: raise Http404 comment = get_object_or_404( models.Post, post_type='comment', id=form.cleaned_data['comment_id'] ) if askbot_settings.READ_ONLY_MODE_ENABLED is False: request.user.repost_comment_as_answer(comment) return HttpResponseRedirect(comment.get_absolute_url()) @decorators.post_only @csrf.csrf_protect #todo: change the urls config for this def repost_answer_as_comment(request, destination=None): assert( destination in ( 'comment_under_question', 'comment_under_previous_answer' ) ) if request.user.is_anonymous(): msg = _('Sorry, only logged in users can convert answers to comments. ' 'Please <a href="%(sign_in_url)s">sign in</a>.') % \ {'sign_in_url': url_utils.get_login_url()} raise exceptions.PermissionDenied(msg) answer_id = request.POST.get('answer_id') if answer_id: try: answer_id = int(answer_id) except (ValueError, TypeError): raise Http404 answer = get_object_or_404(models.Post, post_type = 'answer', id=answer_id) if askbot_settings.READ_ONLY_MODE_ENABLED: return HttpResponseRedirect(answer.get_absolute_url()) request.user.assert_can_convert_post(post=answer) if destination == 'comment_under_question': destination_post = answer.thread._question_post() else: #comment_under_previous_answer destination_post = answer.get_previous_answer(user=request.user) #todo: implement for comment under other answer if destination_post is None: message = _('Error - could not find the destination post') request.user.message_set.create(message=message) return HttpResponseRedirect(answer.get_absolute_url()) if len(answer.text) <= askbot_settings.MAX_COMMENT_LENGTH: answer.post_type = 'comment' answer.parent = destination_post new_comment_count = answer.comments.count() + 1 answer.comment_count = 0 answer_comments = models.Post.objects.get_comments().filter(parent=answer) answer_comments.update(parent=destination_post) #why this and not just "save"? answer.parse_and_save(author=answer.author) answer.thread.update_answer_count() answer.parent.comment_count += new_comment_count answer.parent.save() answer.thread.reset_cached_data() else: message = _( 'Cannot convert, because text has more characters than ' '%(max_chars)s - maximum allowed for comments' ) % {'max_chars': askbot_settings.MAX_COMMENT_LENGTH} request.user.message_set.create(message=message) return HttpResponseRedirect(answer.get_absolute_url()) else: raise Http404
gpl-3.0
-7,389,069,209,881,651,000
38.794769
184
0.566008
false
4.436519
false
false
false
kmshi/miroguide
channelguide/channels/migrations/0003_switch_user_ids.py
1
13086
from south.db import db from django.db import models from channelguide.channels.models import * class Migration: no_dry_run = True def forwards(self, orm): "Write your forwards migration here" for channel in orm.Channel.objects.all(): for field in ('owner', 'featured_by', 'moderator_shared_by', 'last_moderated_by'): value = getattr(channel, '%s_id' % field) if value: profile = orm['user_profile.UserProfile'].objects.get( pk=value) setattr(channel, field, profile.user) else: setattr(channel, field, None) channel.save() def backwards(self, orm): "Write your backwards migration here" models = { 'auth.group': { 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)"}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'user_profile.userprofile': { 'Meta': {'db_table': "'user'"}, 'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'channel_owner_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}), 'email_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'filter_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'fname': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'hashed_password': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'im_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'im_username': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'language': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5'}), 'lname': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'moderator_board_email': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}), 'role': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}), 'show_explicit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'shown_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Language']"}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'status_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'to_field': "'username'", 'unique': 'True', 'db_column': "'username'"}), 'zip': ('django.db.models.fields.CharField', [], {'max_length': '15'}) }, 'auth.user': { 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'channels.addedchannel': { 'Meta': {'unique_together': "[('channel', 'user')]", 'db_table': "'cg_channel_added'"}, 'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['channels.Channel']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['auth.User']"}) }, 'channels.channel': { 'Meta': {'db_table': "'cg_channel'"}, 'adult': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Category']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'featured_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'featured_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'featured_set'", 'null': 'True', 'to': "orm['auth.User']"}), 'feed_etag': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'feed_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'geoip': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}), 'hi_def': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'db_column': "'primary_language_id'", 'to': "orm['labels.Language']"}), 'last_moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_moderated_set'", 'null': 'True', 'to': "orm['auth.User']"}), 'license': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}), 'moderator_shared_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'moderator_shared_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderator_shared_set'", 'null': 'True', 'to': "orm['auth.User']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'to': "orm['auth.User']"}), 'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '15'}), 'publisher': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'state': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Tag']"}), 'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'waiting_for_reply_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'was_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'website_url': ('django.db.models.fields.URLField', [], {'max_length': '255'}) }, 'channels.item': { 'Meta': {'db_table': "'cg_channel_item'"}, 'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['channels.Channel']"}), 'date': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.TextField', [], {}), 'guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'size': ('django.db.models.fields.IntegerField', [], {}), 'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}), 'thumbnail_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '255'}) }, 'channels.lastapproved': { 'Meta': {'db_table': "'cg_channel_last_approved'"}, 'timestamp': ('django.db.models.fields.DateTimeField', [], {'primary_key': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'labels.category': { 'Meta': {'db_table': "'cg_category'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'on_frontpage': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}) }, 'labels.language': { 'Meta': {'db_table': "'cg_channel_language'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'labels.tag': { 'Meta': {'db_table': "'cg_tag'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) } } complete_apps = ['channels']
agpl-3.0
668,921,977,871,084,900
76.431953
177
0.539814
false
3.710235
false
false
false
skdaccess/skdaccess
skdaccess/geo/srtm/cache/data_fetcher.py
2
10677
# The MIT License (MIT) # Copyright (c) 2016 Massachusetts Institute of Technology # # Authors: Cody Rude, Guillaume Rongier # This software has been created in projects supported by the US National # Science Foundation and NASA (PI: Pankratius) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Scikit Data Access imports from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper from skdaccess.utilities.support import convertToStr from skdaccess.utilities.image_util import AffineGlobalCoords, convertBinCentersToEdges # 3rd party imports import pandas as pd import numpy as np import gdal from pkg_resources import resource_filename # Standard library imports from collections import OrderedDict from calendar import monthrange from zipfile import ZipFile import os class DataFetcher(DataFetcherCache): ''' DataFetcher for retrieving data from the Shuttle Radar Topography Mission ''' def __init__(self, lat_tile_start, lat_tile_end, lon_tile_start, lon_tile_end, username, password, arcsecond_sampling = 1, mask_water = True, store_geolocation_grids=False): ''' Initialize Data Fetcher @param lat_tile_start: Latitude of the southwest corner of the starting tile @param lat_tile_end: Latitude of the southwset corner of the last tile @param lon_tile_start: Longitude of the southwest corner of the starting tile @param lon_tile_end: Longitude of the southwest corner of the last tile @param username: NASA Earth Data username @param password: NASA Earth Data Password @param arcsecond_sampling: Sample spacing of the SRTM data, either 1 arc- second or 3 arc-seconds @param mask_water: True if the water bodies should be masked, false otherwise @param store_geolocation_grids: Store grids of latitude and longitude in the metadata ''' assert arcsecond_sampling == 1 or arcsecond_sampling == 3, "Sampling should be 1 or 3 arc-seconds" self.lat_tile_start = lat_tile_start self.lat_tile_end = lat_tile_end self.lon_tile_start = lon_tile_start self.lon_tile_end = lon_tile_end self.username = username self.password = password self.arcsecond_sampling = arcsecond_sampling self.mask_water = mask_water self.store_geolocation_grids = store_geolocation_grids self._missing_data_projection = '\n'.join([ 'GEOGCS["WGS 84",', ' DATUM["WGS_1984",', ' SPHEROID["WGS 84",6378137,298.257223563,', ' AUTHORITY["EPSG","7030"]],', ' AUTHORITY["EPSG","6326"]],', ' PRIMEM["Greenwich",0,', ' AUTHORITY["EPSG","8901"]],', ' UNIT["degree",0.0174532925199433,', ' AUTHORITY["EPSG","9122"]],', ' AUTHORITY["EPSG","4326"]]' ]) super(DataFetcher, self).__init__() def output(self): ''' Generate SRTM data wrapper @return SRTM Image Wrapper ''' lat_tile_array = np.arange(self.lat_tile_start, self.lat_tile_end+1) lon_tile_array = np.arange(self.lon_tile_start, self.lon_tile_end+1) lat_grid,lon_grid = np.meshgrid(lat_tile_array, lon_tile_array) lat_grid = lat_grid.ravel() lon_grid = lon_grid.ravel() filename_root = '.SRTMGL1.' base_url = 'https://e4ftl01.cr.usgs.gov/MEASURES/' folder_root = 'SRTMGL1.003/2000.02.11/' if self.arcsecond_sampling == 3: filename_root = '.SRTMGL3.' folder_root = 'SRTMGL3.003/2000.02.11/' base_url += folder_root filename_list = [] for lat, lon in zip(lat_grid, lon_grid): if lat < 0: lat_label = 'S' lat = np.abs(lat) else: lat_label = 'N' if lon < 0: lon_label = 'W' lon = np.abs(lon) else: lon_label = 'E' filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'hgt.zip') if self.mask_water == True: filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'num.zip') # Read in list of available data srtm_list_filename = 'srtm_gl1.txt' if self.arcsecond_sampling == 3: srtm_list_filename = 'srtm_gl3.txt' srtm_support_filename = resource_filename('skdaccess', os.path.join('support',srtm_list_filename)) available_file_list = open(srtm_support_filename).readlines() available_file_list = [filename.strip() for filename in available_file_list] requested_files = pd.DataFrame({'Filename' : filename_list}) requested_files['Valid'] = [ '.'.join(filename.split('.')[0:-2]) in available_file_list for filename in filename_list ] valid_filename_list = requested_files.loc[ requested_files['Valid']==True, 'Filename'].tolist() url_list = [base_url + filename for filename in valid_filename_list] downloaded_file_list = self.cacheData('srtm', url_list, self.username, self.password, 'https://urs.earthdata.nasa.gov') requested_files.loc[ requested_files['Valid']==True, 'Full Path'] = downloaded_file_list def getCoordinates(filename): ''' Determine the longitude and latitude of the lowerleft corner of the input filename @param in_filename: Input SRTM filename @return Latitude of southwest corner, Longitude of southwest corner ''' lat_start = int(filename[1:3]) if filename[0] == 'S': lat_start *= -1 lon_start = int(filename[4:7]) if filename[3] == 'W': lon_start *= -1 return lat_start, lon_start data_dict = OrderedDict() metadata_dict = OrderedDict() array_shape = (3601,3601) if self.arcsecond_sampling == 3: array_shape = (1201,1201) file_slice = slice(None) water_value = 0 if self.mask_water == True: file_slice = slice(0, -1, 2) water_value = np.nan for i in requested_files.index[file_slice]: hgt_full_path = requested_files.at[i, 'Full Path'] hgt_filename = requested_files.at[i, 'Filename'] label = hgt_filename[:7] lat_start, lon_start = getCoordinates(hgt_filename) metadata_dict[label] = OrderedDict() x_res = 1.0 / (array_shape[0]-1) y_res = 1.0 / (array_shape[1]-1) extents = [ lon_start - x_res / 2, lon_start + 1 + x_res / 2, lat_start - y_res / 2, lat_start + 1 + y_res / 2 ] if requested_files.at[i, 'Valid']: masked_dem_data = np.ones(array_shape) if self.mask_water == True and requested_files.at[i + 1, 'Valid']: num_full_path = requested_files.at[i + 1, 'Full Path'] num_filename = requested_files.at[i + 1, 'Full Path'] zipped_num_data = ZipFile(num_full_path) zipped_num_full_path = zipped_num_data.infolist()[0].filename num_data = np.frombuffer(zipped_num_data.open(zipped_num_full_path).read(), np.dtype('uint8')).reshape(array_shape) masked_dem_data[(num_data == 1) | (num_data == 2)] = water_value i += 1 zipped_hgt_data = ZipFile(hgt_full_path) dem_dataset = gdal.Open(hgt_full_path, gdal.GA_ReadOnly) dem_data = dem_dataset.ReadAsArray() masked_dem_data *= dem_data metadata_dict[label]['WKT'] = dem_dataset.GetProjection() metadata_dict[label]['GeoTransform'] = dem_dataset.GetGeoTransform() else: geo_transform = [] geo_transform.append(extents[0]) geo_transform.append(x_res) geo_transform.append(0) geo_transform.append(extents[-1]) geo_transform.append(0) geo_transform.append(-y_res) metadata_dict[label]['WKT'] = self._missing_data_projection metadata_dict[label]['GeoTransform'] = geo_transform masked_dem_data = np.full(shape=array_shape, fill_value=water_value) i += 1 data_dict[label] = masked_dem_data metadata_dict[label]['Geolocation'] = AffineGlobalCoords(metadata_dict[label]['GeoTransform'], center_pixels=True) metadata_dict[label]['extents'] = extents if self.store_geolocation_grids: lat_coords, lon_coords = np.meshgrid(np.linspace(lat_start+1, lat_start, array_shape[0]), np.linspace(lon_start, lon_start+1, array_shape[1]), indexing = 'ij') metadata_dict[label]['Latitude'] = lat_coords metadata_dict[label]['Longitude'] = lon_coords return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict)
mit
6,060,677,398,640,232,000
39.290566
133
0.583966
false
3.926811
false
false
false
labkode/rtlv
handlers.py
1
6340
from google.appengine.ext.db import BadValueError from google.appengine.api import channel from google.appengine.api import users from google.appengine.ext import ndb import webapp2 import jinja2 import os import json from datetime import datetime import time from models import Log from models import System JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape'], autoescape=True) class MainHandler(webapp2.RequestHandler): def get(self): user = users.get_current_user() systems = System.query().fetch() template_values = {"systems": systems, "user": user, "users": users} template = JINJA_ENVIRONMENT.get_template("templates/index.html") self.response.write(template.render(template_values)) class SystemHandler(webapp2.RequestHandler): def get(self): user = users.get_current_user() system_param = self.request.get('system') if not system_param: template = JINJA_ENVIRONMENT.get_template("templates/not_found.html") template_values = {"user": user, "users": users, "not_found_msg": "Please select a system"} self.response.write(template.render(template_values)) return system = System.get_by_id(system_param) if system is None: template = JINJA_ENVIRONMENT.get_template("templates/not_found.html") template_values = {"user": user, "users": users, "not_found_msg": "The system #{0} not exists".format(system_param)} self.response.write(template.render(template_values)) return #logs = Log.query(ancestor = system.key).fetch() logs = [] template_values = {"system":system, "logs": logs, "token": channel.create_channel(system.key.id()), "user": user, "users": users} template = JINJA_ENVIRONMENT.get_template("templates/logs.html") self.response.write(template.render(template_values)) return class AdminSystemListHandler(webapp2.RequestHandler): def get(self): user = users.get_current_user() if not user: self.redirect(users.create_login_url()) systems = System.query().fetch() template_values = {"systems": systems, "message":{"type":"success", "payload":""},"user": user, "users": users} template = JINJA_ENVIRONMENT.get_template("templates/list_system.html") self.response.write(template.render(template_values)) return class AdminSystemCreateHandler(webapp2.RequestHandler): def get(self): user = users.get_current_user() if not user: self.redirect(users.create_login_url()) template = JINJA_ENVIRONMENT.get_template("templates/create_system.html") self.response.write(template.render({"user": user, "users": users})) return def post(self): user = users.get_current_user() if not user: self.redirect(users.create_login_url()) system_name = self.request.get("name") system_description = self.request.get("description") system = System(id = system_name, description = system_description) key = system.put() # This is correct but is a hack, other solution is to use a sleep() must_stop = False systems = [] while not must_stop: systems = System.query().fetch() for system in systems: if system.key.id() == system_name: must_stop = True systems = System.query().fetch() template_values = {"systems": systems,"message":{"type":"success", "payload":"Created system #{0}".format(key.id())}, "user": user, "users": users} template = JINJA_ENVIRONMENT.get_template("templates/list_system.html") self.response.write(template.render(template_values)) return class AdminSystemDeleteHandler(webapp2.RequestHandler): def post(self): user = users.get_current_user() if not user: self.redirect(users.create_login_url()) system_id = self.request.get("system") if not system_id: template = JINJA_ENVIRONMENT.get_template("templates/not_found.html") template_values = {"user": user, "users": users, "not_found_msg": "Please select a system"} self.response.write(template.render(template_values)) return sys = System.get_by_id(system_id) if sys is None: template = JINJA_ENVIRONMENT.get_template("templates/not_found.html") template_values = {"user": user, "users": users, "not_found_msg": "The system #{0} not exists".format(system_id)} self.response.write(template.render(template_values)) return sys.key.delete() # Hack to not use sleep solution found = True systems = [] while found: found = False systems = System.query().fetch() print(systems) for system in systems: print(system.key.id(),sys.key.id()) if system.key.id() == sys.key.id(): found = True break systems = System.query().fetch() template_values = {"systems": systems, "message":{"type":"success", "payload":"Deleted system #{0}".format(system_id)}, "user": user, "users": users} template = JINJA_ENVIRONMENT.get_template("templates/list_system.html") self.response.write(template.render(template_values)) return class AdminLogHandler(webapp2.RequestHandler): def post(self): try: log_param = json.loads(self.request.body) except ValueError as e: self.response.out.write(e) self.response.set_status(400) return except: self.response.set_status(500) return if not isinstance(log_param, list): log_param = [log_param] for log_item in log_param: log_system = log_item.get("system") if not log_system: self.response.out.write("System not found") self.response.set_status(404) system = System.get_by_id(log_system) if not system: self.response.out.write("System not found") self.response.set_status(404) return try: log_key = ndb.Key("Log", log_item.get("id"), parent = system.key) log_msg = log_item.get("msg") log_level = log_item.get("level") log_ts = log_item.get("ts") log = Log(key = log_key, msg = log_msg, level = log_level, ts = log_ts) # CHANNEL API channel.send_message(system.key.id(), json.dumps(log.to_dict())) except BadValueError as e: self.response.out.write(e) self.response.set_status(400) return return class HelpHandler(webapp2.RequestHandler): def get(self): user = users.get_current_user() template_values = {"user": user, "users": users} template = JINJA_ENVIRONMENT.get_template("templates/help.html") self.response.write(template.render(template_values))
agpl-3.0
5,098,910,542,018,449,000
29.926829
151
0.699211
false
3.192346
false
false
false
incuna/authentic
authentic2/auth2_auth/auth2_openid/views.py
1
17205
import urllib from django_authopenid.forms import OpenidDissociateForm, AssociateOpenID from django_authopenid.forms import OpenidSigninForm from django_authopenid import DjangoOpenIDStore from django_authopenid.models import UserAssociation from django_authopenid.utils import * from django_authopenid.views import associate_failure, complete from django_authopenid.views import _build_context, signin_failure, not_authenticated from django.conf import settings from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AuthenticationForm from django.contrib.auth import REDIRECT_FIELD_NAME, login from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from django.template.loader import render_to_string from django.utils.encoding import smart_unicode from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.generic.simple import redirect_to from django.contrib import messages from openid.consumer.consumer import Consumer, SUCCESS, CANCEL, FAILURE, SETUP_NEEDED from openid.consumer.discover import DiscoveryFailure from openid.yadis import xri from authentic2.auth2_auth.auth2_openid import * OPENID_PROVIDER = ['https://me.yahoo.com//','http://openid.aol.com/','http://.myopenid.com/', 'http://.livejournal.com/','http://www.flickr.com/photos//','http://.wordpress.com/' 'http://.blogspot.com/','http://.pip.verisignlabs.com/','http://.myvidoop.com/' 'http://.pip.verisignlabs.com/','http://claimid.com/'] def signin_success(request, identity_url, openid_response, redirect_field_name=REDIRECT_FIELD_NAME, **kwargs): """ openid signin success. If the openid is already registered, the user is redirected to url set par next or in settings with OPENID_REDIRECT_NEXT variable. If none of these urls are set user is redirectd to /. if openid isn't registered user is redirected to register page. """ openid_ = from_openid_response(openid_response) openids = request.session.get('openids', []) openids.append(openid_) request.session['openids'] = openids request.session['openid'] = openid_ redirect_to = request.REQUEST.get(redirect_field_name, '') try: rel = UserAssociation.objects.get(openid_url__exact = str(openid_)) except: # try to register this new user if not redirect_to: # or '//' in redirect_to or ' ' in redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL params = urllib.urlencode({ redirect_field_name: redirect_to }) redirect_to = "%s?%s" % (reverse('user_register'), params) return HttpResponseRedirect(redirect_to) user_ = rel.user if user_.is_active: user_.backend = "django.contrib.auth.backends.ModelBackend" login(request, user_) if not redirect_to: # or '//' in redirect_to or ' ' in redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL return HttpResponseRedirect(redirect_to) def mycomplete(request, on_success=None, on_failure=None, return_to=None, **kwargs): on_success = on_success or default_on_success on_failure = on_failure or default_on_failure consumer = Consumer(request.session, DjangoOpenIDStore()) # make sure params are encoded in utf8 params = dict((k,smart_unicode(v)) for k, v in request.GET.items()) openid_response = consumer.complete(params, return_to) if not hasattr(request.GET,'openid.identity'): _openid_url = 'None' else: _openid_url = request.GET['openid.identity'] if openid_response.status == SUCCESS: auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'success') return on_success(request, openid_response.identity_url, openid_response, **kwargs) elif openid_response.status == CANCEL: auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'cancel') return on_failure(request, 'The request was canceled', **kwargs) elif openid_response.status == FAILURE: auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'failure') return on_failure(request, openid_response.message, **kwargs) elif openid_response.status == SETUP_NEEDED: auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'setup_needed') return on_failure(request, 'Setup needed', **kwargs) else: assert False, "Bad openid status: %s" % openid_response.status @csrf_exempt def complete_signin(request, redirect_field_name=REDIRECT_FIELD_NAME, openid_form=OpenidSigninForm, auth_form=AuthenticationForm, on_success=signin_success, on_failure=signin_failure, extra_context=None): _openid_form = openid_form _auth_form = auth_form _extra_context = extra_context return mycomplete(request, on_success, on_failure, get_url_host(request) + reverse('user_complete_signin'), redirect_field_name=redirect_field_name, openid_form=_openid_form, auth_form=_auth_form, extra_context=_extra_context) def ask_openid(request, openid_url, redirect_to, on_failure=None): on_failure = on_failure or signin_failure sreg_req = None ax_req = None _openid_url = openid_url trust_root = getattr( settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/' ) if xri.identifierScheme(openid_url) == 'XRI' and getattr( settings, 'OPENID_DISALLOW_INAMES', False ): msg = ("i-names are not supported") auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'not_supported') return on_failure(request, msg) consumer = Consumer(request.session, DjangoOpenIDStore()) try: auth_request = consumer.begin(openid_url) except DiscoveryFailure: msg = ("The OpenID %s was invalid") % openid_url auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'invalid') return on_failure(request, msg) # get capabilities use_ax, use_sreg = discover_extensions(openid_url) if use_sreg: # set sreg extension # we always ask for nickname and email sreg_attrs = getattr(settings, 'OPENID_SREG', {}) sreg_attrs.update({ "optional": ['nickname', 'email'] }) sreg_req = sreg.SRegRequest(**sreg_attrs) if use_ax: # set ax extension # we always ask for nickname and email ax_req = ax.FetchRequest() ax_req.add(ax.AttrInfo('http://schema.openid.net/contact/email', alias='email', required=True)) ax_req.add(ax.AttrInfo('http://schema.openid.net/namePerson/friendly', alias='nickname', required=True)) # add custom ax attrs ax_attrs = getattr(settings, 'OPENID_AX', []) for attr in ax_attrs: if len(attr) == 2: ax_req.add(ax.AttrInfo(attr[0], required=alias[1])) else: ax_req.add(ax.AttrInfo(attr[0])) if sreg_req is not None: auth_request.addExtension(sreg_req) if ax_req is not None: auth_request.addExtension(ax_req) redirect_url = auth_request.redirectURL(trust_root, redirect_to) return HttpResponseRedirect(redirect_url) @csrf_exempt @not_authenticated def signin(request, template_name='authopenid/signin.html', redirect_field_name=REDIRECT_FIELD_NAME, openid_form=OpenidSigninForm, auth_form=AuthenticationForm, on_failure=None, extra_context=None): if on_failure is None: on_failure = signin_failure redirect_to = request.REQUEST.get(redirect_field_name, '') form1 = openid_form() form2 = auth_form() if request.POST: if not redirect_to or '://' in redirect_to or ' ' in redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL if 'openid_url' in request.POST.keys(): form1 = openid_form(data=request.POST) if form1.is_valid(): redirect_url = "%s%s?%s" % ( get_url_host(request), reverse('user_complete_signin'), urllib.urlencode({ redirect_field_name: redirect_to }) ) return ask_openid(request, form1.cleaned_data['openid_url'], redirect_url, on_failure=on_failure) else: # perform normal django authentification form2 = auth_form(data=request.POST) if form2.is_valid(): login(request, form2.get_user()) if request.session.test_cookie_worked(): request.session.delete_test_cookie() return HttpResponseRedirect(redirect_to) return render_to_response(template_name, { 'form1': form1, 'form2': form2, redirect_field_name: redirect_to, 'msg': request.GET.get('msg','') }, context_instance=_build_context(request, extra_context=extra_context)) @csrf_exempt @login_required def dissociate(request, template_name="authopenid/dissociate.html", dissociate_form=OpenidDissociateForm, redirect_field_name=REDIRECT_FIELD_NAME, default_redirect=settings.LOGIN_REDIRECT_URL, extra_context=None): """ view used to dissociate an openid from an account """ nb_associated_openids, associated_openids = get_associate_openid(request.user) if nb_associated_openids == 1 and not request.user.has_usable_password() and request.method != 'GET': msg = ["You can't remove this openid, you should set a password first."] return render_to_response("authopenid/associate.html",{ 'associated_openids' : associated_openids , 'nb_associated_openids':nb_associated_openids, 'msg': msg}, context_instance = RequestContext(request) ) if request.POST: if request.POST.get('bdissociate_cancel','') == 'Cancel': msg = ['Operation Cancel.'] return redirect_to(request,'/accounts/openid/associate/') openid_urls = request.POST.getlist('a_openids_remove') if len(openid_urls) >= 1: for openid_url in openid_urls: UserAssociation.objects.get(openid_url__exact=openid_url).delete() if openid_url == request.session.get('openid_url'): del request.session['openid_url'] msg = "Openid removed." request.user.message_set.create(message = msg) return redirect_to(request,'/accounts/openid/associate') else: return redirect_to(request, '/accounts/openid/associate') @login_required def associate(request, template_name='authopenid/associate.html', openid_form=AssociateOpenID, redirect_field_name='/', on_failure=associate_failure, extra_context=None): nb_associated_openids, associated_openids = get_associate_openid(request.user) redirect_to = request.REQUEST.get(redirect_field_name, '') if request.POST: if 'a_openids' in request.POST.keys(): a_openids = [] if request.POST.get('a_openids','') is not '': a_openids = request.POST.getlist('a_openids') if len(a_openids) == nb_associated_openids and not request.user.has_usable_password(): if len(a_openids) > 1: msg = ["You can't remove these openids, You should set a password first."] else: msg = ["You can't remove this openid, You should set a password first."] return render_to_response('authopenid/associate.html', { redirect_field_name: redirect_to, 'associated_openids' : associated_openids, 'nb_associated_openids' : nb_associated_openids, 'msg':msg, }, context_instance=_build_context(request, extra_context=extra_context)) return render_to_response("authopenid/dissociate.html",{ 'a_openids' : a_openids }, context_instance = RequestContext(request) ) else: form = openid_form(request.user, data=request.POST) if form.is_valid(): if ' ' in form.cleaned_data['openid_url'] or form.cleaned_data['openid_url'] in OPENID_PROVIDER: msg = ['You must enter a valid OpenID url'] return render_to_response('authopenid/associate.html', { redirect_field_name: redirect_to, 'associated_openids' : associated_openids, 'nb_associated_openids' : nb_associated_openids, 'msg':msg, }, context_instance=_build_context(request, extra_context=extra_context)) if not redirect_to or '://' in redirect_to or ' ' in redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL redirect_url = "%s%s?%s" % ( get_url_host(request), reverse('user_complete_myassociate'), urllib.urlencode({ redirect_field_name: redirect_to }) ) return ask_openid(request, form.cleaned_data['openid_url'], redirect_url, on_failure=on_failure) else: msg = ['You must enter a valid OpenID url'] return render_to_response('authopenid/associate.html', { redirect_field_name: redirect_to, 'associated_openids' : associated_openids, 'nb_associated_openids' : nb_associated_openids, 'msg':msg, }, context_instance=_build_context(request, extra_context=extra_context)) else: form = openid_form(request.user) msg = messages.get_messages(request) return render_to_response('authopenid/associate.html', { 'form': form, redirect_field_name: redirect_to, 'associated_openids' : associated_openids, 'nb_associated_openids' : nb_associated_openids, 'msg':msg, }, context_instance=_build_context(request, extra_context=extra_context)) @login_required def associate_success(request, identity_url, openid_response, redirect_field_name=REDIRECT_FIELD_NAME, send_email=True, **kwargs): openid_ = from_openid_response(openid_response) openids = request.session.get('openids', []) openids.append(openid_) request.session['openids'] = openids uassoc = UserAssociation( openid_url=str(openid_), user_id=request.user.id ) uassoc.save(send_email=send_email) redirect_to = '/accounts/openid/associate' nb_associated_openids, associated_openids = get_associate_openid(request.user) msg = ["Your Openid has been added"] return render_to_response("authopenid/associate.html",{ 'associated_openids' : associated_openids , 'nb_associated_openids':nb_associated_openids, 'msg': msg}, context_instance = RequestContext(request) ) @csrf_exempt @login_required def complete_associate(request, redirect_field_name=REDIRECT_FIELD_NAME, template_failure='authopenid/associate.html', openid_form=AssociateOpenID, redirect_name=None, on_success=associate_success, on_failure=associate_failure, send_email=True, extra_context=None): if request.method == 'GET': return mycomplete(request, on_success, on_failure, get_url_host(request) + reverse('user_complete_myassociate'), redirect_field_name=redirect_field_name, openid_form=openid_form, template_failure=template_failure, redirect_name=redirect_name, send_email=send_email, extra_context=extra_context) else: return associate(request, template_name='authopenid/associate.html', openid_form=AssociateOpenID, redirect_field_name='/', on_failure=associate_failure, extra_context=None) def get_associate_openid(user): """ get list of associated openids """ rels = UserAssociation.objects.filter(user=user) associated_openids = [rel.openid_url for rel in rels] nb_associated_openids = len(associated_openids) return nb_associated_openids, associated_openids def openid_profile(request, next, template_name='auth/openid_profile.html'): nb, associated_openids = get_associate_openid(request.user) return render_to_string(template_name, { 'idp_openid': getattr(settings, 'IDP_OPENID', False), 'associated_openids': associated_openids}, RequestContext(request))
agpl-3.0
-8,868,549,658,912,582,000
45.25
112
0.626271
false
3.950631
false
false
false
espressomd/espresso
maintainer/benchmarks/lb.py
1
6553
# # Copyright (C) 2013-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Benchmark Lattice-Boltzmann fluid + Lennard-Jones particles """ import os import sys import numpy as np from time import time import argparse parser = argparse.ArgumentParser(description="Benchmark LB simulations. " "Save the results to a CSV file.") parser.add_argument("--particles_per_core", metavar="N", action="store", type=int, default=125, required=False, help="Number of particles in the simulation box") parser.add_argument("--lb_sites_per_particle", metavar="N_LB", action="store", type=float, default=28, required=False, help="Number of particles in the simulation box") parser.add_argument("--volume_fraction", metavar="FRAC", action="store", type=float, default=0.03, required=False, help="Fraction of the simulation box volume occupied by " "particles (range: [0.01-0.74], default: 0.50)") group = parser.add_mutually_exclusive_group() group.add_argument("--output", metavar="FILEPATH", action="store", type=str, required=False, default="benchmarks.csv", help="Output file (default: benchmarks.csv)") args = parser.parse_args() # process and check arguments n_iterations = 30 assert args.volume_fraction > 0, "volume_fraction must be a positive number" assert args.volume_fraction < np.pi / (3 * np.sqrt(2)), \ "volume_fraction exceeds the physical limit of sphere packing (~0.74)" import espressomd required_features = ["LENNARD_JONES"] espressomd.assert_features(required_features) # System ############################################################# system = espressomd.System(box_l=[1, 1, 1]) # Interaction parameters (Lennard-Jones) ############################################################# lj_eps = 1.0 # LJ epsilon lj_sig = 1.0 # particle diameter lj_cut = lj_sig * 2**(1. / 6.) # cutoff distance # System parameters ############################################################# n_proc = system.cell_system.get_state()['n_nodes'] n_part = n_proc * args.particles_per_core # volume of N spheres with radius r: N * (4/3*pi*r^3) box_l = (n_part * 4. / 3. * np.pi * (lj_sig / 2.)**3 / args.volume_fraction)**(1. / 3.) lb_grid = int((round(n_part * args.lb_sites_per_particle)**(1. / 3))) agrid = box_l / lb_grid measurement_steps = int(max(120**3 / lb_grid**3, 50)) # System ############################################################# system.box_l = 3 * (box_l,) # PRNG seeds ############################################################# # np.random.seed(1) # Integration parameters ############################################################# system.time_step = 0.01 system.cell_system.skin = 0.5 system.thermostat.turn_off() ############################################################# # Setup System # ############################################################# # Interaction setup ############################################################# system.non_bonded_inter[0, 0].lennard_jones.set_params( epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto") # Particle setup ############################################################# # Warmup Integration # ############################################################# system.integrator.set_steepest_descent( f_max=0, gamma=0.001, max_displacement=0.01) # warmup while system.analysis.energy()["total"] > 0.1 * n_part: print("minimization: {:.1f}".format(system.analysis.energy()["total"])) system.integrator.run(20) print("minimization: {:.1f}".format(system.analysis.energy()["total"])) print() system.integrator.set_vv() system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42) # tuning and equilibration print("Tune skin: {}".format(system.cell_system.tune_skin( min_skin=0.2, max_skin=1, tol=0.05, int_steps=100))) system.integrator.run(500) print("Tune skin: {}".format(system.cell_system.tune_skin( min_skin=0.2, max_skin=1, tol=0.05, int_steps=100))) system.integrator.run(500) system.thermostat.turn_off() print("lb sites", lb_grid, "agrid", agrid) if "LBFluid" in dir(espressomd.lb): LBClass = espressomd.lb.LBFluid elif "LBFluidWalberla" in dir(espressomd.lb): LBClass = espressomd.lb.LBFluidWalberla else: raise Exception("LB not built in") lbf = LBClass(agrid=agrid, dens=1, visc=1, tau=system.time_step, kT=1, seed=1) system.actors.add(lbf) print("lb shape", lbf.shape) system.thermostat.set_lb(gamma=10, LB_fluid=lbf, seed=2) # time integration loop print("Timing every {} steps".format(measurement_steps)) main_tick = time() all_t = [] for i in range(n_iterations): tick = time() system.integrator.run(measurement_steps) tock = time() t = (tock - tick) / measurement_steps print("step {}, time = {:.2e}, verlet: {:.2f}, energy: {:.2e}" .format(i, t, system.cell_system.get_state()["verlet_reuse"], system.analysis.energy()["total"])) all_t.append(t) main_tock = time() # average time all_t = np.array(all_t) avg = np.average(all_t) ci = 1.96 * np.std(all_t) / np.sqrt(len(all_t) - 1) print("average: {:.3e} +/- {:.3e} (95% C.I.)".format(avg, ci)) cmd = " ".join(x for x in sys.argv[1:] if not x.startswith("--output")) report = ('"{script}","{arguments}",{cores},{mean:.3e},' '{ci:.3e},{n},{dur:.1f}\n'.format( script=os.path.basename(sys.argv[0]), arguments=cmd, cores=n_proc, dur=main_tock - main_tick, n=measurement_steps, mean=avg, ci=ci)) if not os.path.isfile(args.output): report = ('"script","arguments","cores","mean","ci",' '"nsteps","duration"\n' + report) with open(args.output, "a") as f: f.write(report)
gpl-3.0
-8,576,075,647,164,855,000
34.808743
78
0.581566
false
3.377835
false
false
false
frontg8/frontg8lib
doc/ext/breathe/breathe/renderer/filter.py
1
37770
""" Filters ------- Filters are an interesting and somewhat challenging part of the code base. They are used for two different purposes: - To figure out which nodes in the xml hierarchy to start rendering from. These are called 'finder filters' or 'content filters'. This is done before rendering starts. - To figure out which nodes under a selected nodes in the xml hierarchy should be rendered. These are called 'render filters'. This is done during the render process with a test in the DoxygenToRstRendererFactory. General Implementation ~~~~~~~~~~~~~~~~~~~~~~ Filters are essential just tests to see if a node matches certain parameters that are needed to decide whether or not to include it in some output. As these filters are declared once and then used on multiple nodes, we model them as object hierarchies that encapsulate the required test and take a node (with its context) and return True or False. If you wanted a test which figures out if a node has the node_type 'memberdef' you might create the following object hierarchy: node_is_memberdef = InFilter(AttributeAccessor(Node(), 'node_type'), ['memberdef']) This reads from the inside out, as get the node, then get the node_type attribute from it, and see if the value of the attribute is in the list ['memberdef']. The Node() is called a 'Selector'. Parent() is also a selector. It means given the current context, work with the parent of the current node rather than the node itself. This allows you to frame tests in terms of a node's parent as well as the node which helps when we want nodes with particular parents and not others. The AttributeAccessor() is called an 'Accessor'. It wraps up an attempt to access a particular attribute on the selected node. There are quite a few different specific accessors but they can mostly be generalised with the AttributeAccessor. This code has evolved over time and initially the implementation involved specific accessor classes (which are still used in large parts of it.) The InFilter() is unsurprisingly called a 'Filter'. There are lots of different filters. Filters either act on the results of Accessors or on the results of other Filters and they always return True or False. The AndFilter and the OrFilter can be used to combine the outputs of other Filters with logical 'and' and 'or' operations. You can build up some pretty complex expressions with this level of freedom as you might imagine. The complexity is unfortunate but necessary as the nature of filtering the xml is quite complex. Finder Filters ~~~~~~~~~~~~~~ The implementation of the filters can change a little depending on how they are called. Finder filters are called from the breathe.finder.doxygen.index and breathe.finder.doxygen.compound files. They are called like this: # Descend down the hierarchy # ... if filter_.allow(node_stack): matches.append(self.data_object) # Keep on descending # ... This means that the result of the filter does not stop us descending down the hierarchy and testing more nodes. This simplifies the filters as they only have to return true for the exact nodes they are interested in and they don't have to worry about allowing the iteration down the hierarchy to continue for nodes which don't match. An example of a finder filter is: AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"]), InFilter(KindAccessor(Node()), ["group"]), InFilter(NameAccessor(Node()), ["mygroup"]) ) This says, return True for all the nodes of node_type 'compound' with 'kind' set to 'group' which have the name 'mygroup'. It returns false for everything else, but when a node matching this is found then it is added to the matches list by the code above. It is therefore relatively easy to write finder filters. If you have two separate node filters like the one above and you want to match on both of them then you can do: OrFilter( node_filter_1, node_filter_2 ) To combine them. Content Filters ~~~~~~~~~~~~~~~ Content filters are harder than the finder filters as they are responsible for halting the iteration down the hierarchy if they return false. This means that if you're interested in memberdef nodes with a particular attribute then you have to check for that but also include a clause which allows all other non-memberdef nodes to pass through as you don't want to interrupt them. This means you end up with filters like this: OrFilter( AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"]), InFilter(KindAccessor(Node()), ["group"]), InFilter(NameAccessor(Node()), ["mygroup"]) ), NotFilter( AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"]), InFilter(KindAccessor(Node()), ["group"]), ) ) ) Which is to say that we want to let through a compound, with kind group, with name 'mygroup' but we're also happy if the node is **not** a compund with kind group. Really we just don't want to let through any compounds with kind group with name other than 'mygroup'. As such, we can rephrase this as: NotFilter( AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"]), InFilter(KindAccessor(Node()), ["group"]), NotFilter(InFilter(NameAccessor(Node()), ["mygroup"])) ) ) Using logical manipulation we can rewrite this as: OrFilter( NotFilter(InFilter(NodeTypeAccessor(Node()), ["compound"])), NotFilter(InFilter(KindAccessor(Node()), ["group"])), InFilter(NameAccessor(Node()), ["mygroup"]) ) We reads: allow if it isn't a compound, or if it is a compound but doesn't have a 'kind' of 'group', but if it is a compound and has a 'kind' of 'group then only allow it if it is named 'mygroup'. Helper Syntax ~~~~~~~~~~~~~ Some of these filter declarations get a little awkward to read and write. They are not laid out in manner which reads smoothly. Additional helper methods and operator overloads have been introduced to help with this. AttributeAccessor objects are created in property methods on the Selector classes so: node.kind Where node has been declared as a Node() instance. Results in: AttributeAccessor(Node(), 'kind') The '==' and '!=' operators on the Accessors have been overloaded to return the appropriate filters so that: node.kind == 'group' Results in: InFilter(AttributeAccessor(Node(), 'kind'), ['kind']) We also override the binary 'and' (&), 'or' (|) and 'not' (~) operators in Python to apply AndFilters, OrFilters and NotFilters respectively. We have to override the binary operators as they actual 'and', 'or' and 'not' operators cannot be overridden. So: (node.node_type == 'compound') & (node.name == 'mygroup') Translates to: AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"])), InFilter(NameAccessor(Node()), ["mygroup"]) ) Where the former is hopefully more readable without sacrificing too much to the abstract magic of operator overloads. Operator Precedences & Extra Parenthesis '''''''''''''''''''''''''''''''''''''''' As the binary operators have a lower operator precedence than '==' and '!=' and some other operators we have to include additional parenthesis in the expressions to group them as we want. So instead of writing: node.node_type == 'compound' & node.name == 'mygroup' We have to write: (node.node_type == 'compound') & (node.name == 'mygroup') """ import six class UnrecognisedKindError(Exception): pass class Selector(object): @property def node_type(self): return NodeTypeAccessor(self) @property def kind(self): return AttributeAccessor(self, 'kind') @property def node_name(self): return AttributeAccessor(self, 'node_name') @property def name(self): return AttributeAccessor(self, 'name') @property def briefdescription(self): return AttributeAccessor(self, 'briefdescription') @property def detaileddescription(self): return AttributeAccessor(self, 'detaileddescription') @property def prot(self): return AttributeAccessor(self, 'prot') @property def valueOf(self): return AttributeAccessor(self, 'valueOf_') @property def id(self): return AttributeAccessor(self, 'id') class Ancestor(Selector): def __init__(self, generations): self.generations = generations def __call__(self, node_stack): return node_stack[self.generations] class Parent(Selector): def __call__(self, node_stack): return node_stack[1] class Node(Selector): def __call__(self, node_stack): return node_stack[0] class Accessor(object): def __init__(self, selector): self.selector = selector def __eq__(self, value): return InFilter(self, [value]) def __ne__(self, value): return NotFilter(InFilter(self, [value])) def is_one_of(self, collection): return InFilter(self, collection) def has_content(self): return HasContentFilter(self) def endswith(self, options): return EndsWithFilter(self, options) class NameAccessor(Accessor): def __call__(self, node_stack): return self.selector(node_stack).name class NodeNameAccessor(Accessor): """Check the .node_name member which is declared on refTypeSub nodes It distinguishes between innerclass, innernamespace, etc. """ def __call__(self, node_stack): return self.selector(node_stack).node_name class NodeTypeAccessor(Accessor): def __call__(self, node_stack): data_object = self.selector(node_stack) try: return data_object.node_type except AttributeError as e: # Horrible hack to silence errors on filtering unicode objects # until we fix the parsing if type(data_object) == six.text_type: return "unicode" else: raise e class KindAccessor(Accessor): def __call__(self, node_stack): return self.selector(node_stack).kind class AttributeAccessor(Accessor): """Returns the value of a particular attribute on the selected node. AttributeAccessor(Node(), 'name') returns the value of ``node.name``. """ def __init__(self, selector, attribute_name): Accessor.__init__(self, selector) self.attribute_name = attribute_name def __call__(self, node_stack): return getattr(self.selector(node_stack), self.attribute_name) class LambdaAccessor(Accessor): def __init__(self, selector, func): Accessor.__init__(self, selector) self.func = func def __call__(self, node_stack): return self.func(self.selector(node_stack)) class NamespaceAccessor(Accessor): def __call__(self, node_stack): return self.selector(node_stack).namespaces class Filter(object): def __and__(self, other): return AndFilter(self, other) def __or__(self, other): return OrFilter(self, other) def __invert__(self): return NotFilter(self) class HasAncestorFilter(Filter): def __init__(self, generations): self.generations = generations def allow(self, node_stack): return len(node_stack) > self.generations class HasContentFilter(Filter): def __init__(self, accessor): self.accessor = accessor def allow(self, node_stack): """Detects if the node in questions has an empty .content_ property. """ return bool(self.accessor(node_stack).content_) class EndsWithFilter(Filter): """Detects if the string result of the accessor ends with any of the strings in the ``options`` iterable parameter. """ def __init__(self, accessor, options): self.accessor = accessor self.options = options def allow(self, node_stack): string = self.accessor(node_stack) for entry in self.options: if string.endswith(entry): return True return False class InFilter(Filter): """Checks if what is returned from the accessor is 'in' in the members""" def __init__(self, accessor, members): self.accessor = accessor self.members = members def allow(self, node_stack): name = self.accessor(node_stack) return name in self.members class GlobFilter(Filter): def __init__(self, accessor, glob): self.accessor = accessor self.glob = glob def allow(self, node_stack): text = self.accessor(node_stack) return self.glob.match(text) class FilePathFilter(Filter): def __init__(self, accessor, target_file, path_handler): self.accessor = accessor self.target_file = target_file self.path_handler = path_handler def allow(self, node_stack): location = self.accessor(node_stack).file if self.path_handler.includes_directory(self.target_file): # If the target_file contains directory separators then # match against the same length at the end of the location # location_match = location[-len(self.target_file):] return location_match == self.target_file else: # If there are no separators, match against the whole filename # at the end of the location # # This is to prevent "Util.cpp" matching "PathUtil.cpp" # location_basename = self.path_handler.basename(location) return location_basename == self.target_file class NamespaceFilter(Filter): def __init__(self, namespace_accessor, name_accessor): self.namespace_accessor = namespace_accessor self.name_accessor = name_accessor def allow(self, node_stack): namespaces = self.namespace_accessor(node_stack) name = self.name_accessor(node_stack) try: namespace, name = name.rsplit("::", 1) except ValueError: namespace, name = "", name return namespace in namespaces class OpenFilter(Filter): def allow(self, node_stack): return True class ClosedFilter(Filter): def allow(self, node_stack): return False class NotFilter(Filter): def __init__(self, child_filter): self.child_filter = child_filter def allow(self, node_stack): return not self.child_filter.allow(node_stack) class AndFilter(Filter): def __init__(self, *filters): self.filters = filters def allow(self, node_stack): # If any filter returns False then return False for filter_ in self.filters: if not filter_.allow(node_stack): return False return True class OrFilter(Filter): """Provides a short-cutted 'or' operation between two filters""" def __init__(self, *filters): self.filters = filters def allow(self, node_stack): # If any filter returns True then return True for filter_ in self.filters: if filter_.allow(node_stack): return True return False class IfFilter(Filter): def __init__(self, condition, if_true, if_false): self.condition = condition self.if_true = if_true self.if_false = if_false def allow(self, node_stack): if self.condition.allow(node_stack): return self.if_true.allow(node_stack) else: return self.if_false.allow(node_stack) class Glob(object): def __init__(self, method, pattern): self.method = method self.pattern = pattern def match(self, name): return self.method(name, self.pattern) class Gather(object): def __init__(self, accessor, names): self.accessor = accessor self.names = names def allow(self, node_stack): self.names.extend(self.accessor(node_stack)) return False class FilterFactory(object): # C++ style public entries public_kinds = set([ "public-type", "public-func", "public-attrib", "public-slot", "public-static-func", "public-static-attrib", ]) def __init__(self, path_handler): self.path_handler = path_handler self.default_members = () self.implementation_filename_extensions = () def create_render_filter(self, kind, options): """Render filter for group & namespace blocks""" if kind not in ['group', 'namespace']: raise UnrecognisedKindError(kind) # Generate new dictionary from defaults filter_options = dict((entry, u'') for entry in self.default_members) # Update from the actual options filter_options.update(options) # Convert the doxygengroup members flag (which just stores None as the value) to an empty # string to allow the create_class_member_filter to process it properly if 'members' in filter_options: filter_options['members'] = u'' node = Node() grandparent = Ancestor(2) has_grandparent = HasAncestorFilter(2) non_class_memberdef = \ has_grandparent \ & (grandparent.node_type == 'compounddef') \ & (grandparent.kind != 'class') \ & (grandparent.kind != 'struct') \ & (node.node_type == 'memberdef') return (self.create_class_member_filter(filter_options) | non_class_memberdef) \ & self.create_innerclass_filter(filter_options) \ & self.create_outline_filter(filter_options) def create_class_filter(self, target, options): """Content filter for classes based on various directive options""" # Generate new dictionary from defaults filter_options = dict((entry, u'') for entry in self.default_members) # Update from the actual options filter_options.update(options) return AndFilter( self.create_class_member_filter(filter_options), self.create_innerclass_filter(filter_options, outerclass=target), self.create_outline_filter(filter_options), self.create_show_filter(filter_options), ) def create_innerclass_filter(self, options, outerclass=''): """ :param outerclass: Should be the class/struct being target by the directive calling this code. If it is a group or namespace directive then it should be left blank. It is used when looking for names listed in the :members: option. The name should include any additional namespaces that the target class is in. """ node = Node() node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass") parent = Parent() parent_is_compounddef = parent.node_type == 'compounddef' parent_is_class = parent.kind.is_one_of(['class', 'struct']) allowed = set() all_options = { 'protected-members': 'protected', 'private-members': 'private', } for option, scope in all_options.iteritems(): if option in options: allowed.add(scope) node_is_innerclass_in_class = parent_is_compounddef & parent_is_class & node_is_innerclass public_innerclass_filter = ClosedFilter() if 'members' in options: if options['members'].strip(): text = options["members"] prefix = ('%s::' % outerclass) if outerclass else '' # Matches sphinx-autodoc behaviour of comma separated values members = set(['%s%s' % (prefix, x.strip()) for x in text.split(",")]) node_valueOf_is_in_members = node.valueOf.is_one_of(members) # Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only # accept them if their names are in the members list public_innerclass_filter = ~node_is_innerclass_in_class | node_valueOf_is_in_members else: allowed.add('public') node_is_in_allowed_scope = node.prot.is_one_of(allowed) innerclass = ~ node_is_innerclass_in_class | node_is_in_allowed_scope description = self._create_description_filter(True, 'compounddef', options) # Put parent check last as we only want to check parents of innerclass's otherwise we have # to check the parent's type as well return innerclass | public_innerclass_filter | description def create_show_filter(self, options): """Currently only handles the header-file entry""" try: text = options["show"] except KeyError: # Allow through everything except the header-file includes nodes return OrFilter( NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])), NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"])) ) if text == "header-file": # Allow through everything, including header-file includes return OpenFilter() # Allow through everything except the header-file includes nodes return OrFilter( NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])), NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"])) ) def _create_description_filter(self, allow, level, options): """Whether or not we allow descriptions is determined by the calling function and we just do whatever the 'allow' function parameter tells us. """ node = Node() node_is_description = node.node_type == 'description' parent = Parent() parent_is_level = parent.node_type == level # Nothing with a parent that's a sectiondef description_filter = ~ parent_is_level # Let through any description children of sectiondefs if we output any kind members if allow: description_filter = \ (parent_is_level & node_is_description) | ~ parent_is_level return description_filter def _create_public_members_filter(self, options): node = Node() node_is_memberdef = node.node_type == "memberdef" node_is_public = node.prot == "public" parent = Parent() parent_is_sectiondef = parent.node_type == "sectiondef" # Nothing with a parent that's a sectiondef is_memberdef = parent_is_sectiondef & node_is_memberdef public_members_filter = ~ is_memberdef # If the user has specified the 'members' option with arguments then we only pay attention # to that and not to any other member settings if "members" in options: if options['members'].strip(): text = options["members"] # Matches sphinx-autodoc behaviour of comma separated values members = set([x.strip() for x in text.split(",")]) node_name_is_in_members = node.name.is_one_of(members) # Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only # accept them if their names are in the members list public_members_filter = \ (parent_is_sectiondef & node_name_is_in_members) | ~ parent_is_sectiondef else: # Select anything that doesn't have a parent which is a sectiondef, or, if it does, # only select the public ones public_members_filter = \ (is_memberdef & node_is_public) | ~ is_memberdef return public_members_filter def _create_non_public_members_filter(self, prot, option_name, options): """'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories.""" node = Node() node_is_memberdef = node.node_type == "memberdef" node_is_public = node.prot == prot parent = Parent() parent_is_sectiondef = parent.node_type == "sectiondef" # Nothing with a parent that's a sectiondef is_memberdef = parent_is_sectiondef & node_is_memberdef filter_ = ~ is_memberdef if option_name in options: # Allow anything that isn't a memberdef, or if it is only allow the public ones filter_ = ~ is_memberdef | node_is_public return filter_ def _create_undoc_members_filter(self, options): node = Node() node_is_memberdef = node.node_type == 'memberdef' node_has_description = node.briefdescription.has_content() \ | node.detaileddescription.has_content() # Allow anything that isn't a memberdef, or if it is only allow the ones with a description undoc_members_filter = ~ node_is_memberdef | node_has_description if 'undoc-members' in options: undoc_members_filter = OpenFilter() return undoc_members_filter def create_class_member_filter(self, options): """Content filter based on :members: and :private-members: classes""" # I can't fully explain the filtering of descriptions here. More testing needed to figure # out when it is needed. This approach reflects the old code that was here but it wasn't # commented (my fault.) I wonder if maybe the public and private declarations themselves can # be documented and we need to let them through. Not sure. allow = 'members' in options \ or 'protected-members' in options \ or 'private-members' in options description = self._create_description_filter(allow, 'sectiondef', options) # Create all necessary filters and combine them public_members = self._create_public_members_filter(options) protected_members = self._create_non_public_members_filter( 'protected', 'protected-members', options ) private_members = self._create_non_public_members_filter( 'private', 'private-members', options ) undoc_members = self._create_undoc_members_filter(options) # Allow any public/private members which also fit the undoc filter and all the descriptions allowed_members = (public_members | protected_members | private_members) & undoc_members return allowed_members | description def create_outline_filter(self, options): if 'outline' in options: node = Node() return ~ node.node_type.is_one_of(["description", "inc"]) else: return OpenFilter() def create_file_filter(self, filename, options): valid_names = [] filter_ = AndFilter( NotFilter( # Gather the "namespaces" attribute from the # compounddef for the file we're rendering and # store the information in the "valid_names" list # # Gather always returns false, so, combined with # the NotFilter this chunk always returns true and # so does not affect the result of the filtering AndFilter( InFilter(NodeTypeAccessor(Node()), ["compounddef"]), InFilter(KindAccessor(Node()), ["file"]), FilePathFilter( LambdaAccessor(Node(), lambda x: x.location), filename, self.path_handler ), Gather(LambdaAccessor(Node(), lambda x: x.namespaces), valid_names) ) ), NotFilter( # Take the valid_names and everytime we handle an # innerclass or innernamespace, check that its name # was one of those initial valid names so that we # never end up rendering a namespace or class that # wasn't in the initial file. Notably this is # required as the location attribute for the # namespace in the xml is unreliable. AndFilter( InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), InFilter(NodeTypeAccessor(Node()), ["ref"]), InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), NotFilter( InFilter( LambdaAccessor(Node(), lambda x: x.content_[0].getValue()), valid_names ) ) ) ), NotFilter( # Ignore innerclasses and innernamespaces that are inside a # namespace that is going to be rendered as they will be # rendered with that namespace and we don't want them twice AndFilter( InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), InFilter(NodeTypeAccessor(Node()), ["ref"]), InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]), NamespaceFilter( NamespaceAccessor(Parent()), LambdaAccessor(Node(), lambda x: x.content_[0].getValue()) ) ) ), NotFilter( # Ignore memberdefs from files which are different to # the one we're rendering. This happens when we have to # cross into a namespace xml file which has entries # from multiple files in it AndFilter( InFilter(NodeTypeAccessor(Node()), ["memberdef"]), NotFilter( FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename, self.path_handler) ) ) ), NotFilter( # Ignore compounddefs which are from another file # (normally means classes and structs which are in a # namespace that we have other interests in) but only # check it if the compounddef is not a namespace # itself, as for some reason compounddefs for # namespaces are registered with just a single file # location even if they namespace is spread over # multiple files AndFilter( InFilter(NodeTypeAccessor(Node()), ["compounddef"]), NotFilter(InFilter(KindAccessor(Node()), ["namespace"])), NotFilter( FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename, self.path_handler) ) ) ) ) return AndFilter( self.create_outline_filter(options), filter_ ) def create_content_filter(self, kind, options): """Returns a filter which matches the contents of the or namespace but not the group or namepace name or description. This allows the groups to be used to structure sections of the documentation rather than to structure and further document groups of documentation As a finder/content filter we only need to match exactly what we're interested in. """ if kind not in ['group', 'namespace']: raise UnrecognisedKindError(kind) node = Node() # Filter for public memberdefs node_is_memberdef = node.node_type == 'memberdef' node_is_public = node.prot == 'public' public_members = node_is_memberdef & node_is_public # Filter for public innerclasses parent = Parent() parent_is_compounddef = parent.node_type == 'compounddef' parent_is_class = parent.kind == kind node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass") node_is_public = node.prot == 'public' public_innerclass = parent_is_compounddef & parent_is_class \ & node_is_innerclass & node_is_public return public_members | public_innerclass def create_index_filter(self, options): filter_ = AndFilter( NotFilter( AndFilter( InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), InFilter(NodeTypeAccessor(Node()), ["ref"]), InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]) ) ), NotFilter( AndFilter( InFilter(NodeTypeAccessor(Parent()), ["compounddef"]), InFilter(KindAccessor(Parent()), ["group"]), InFilter(NodeTypeAccessor(Node()), ["sectiondef"]), InFilter(KindAccessor(Node()), ["func"]) ) ) ) return AndFilter( self.create_outline_filter(options), filter_ ) def create_open_filter(self): """Returns a completely open filter which matches everything""" return OpenFilter() def create_id_filter(self, node_type, refid): node = Node() return (node.node_type == node_type) & (node.id == refid) def create_file_finder_filter(self, filename): filter_ = AndFilter( InFilter(NodeTypeAccessor(Node()), ["compounddef"]), InFilter(KindAccessor(Node()), ["file"]), FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename, self.path_handler) ) return filter_ def create_member_finder_filter(self, namespace, name, kind): """Returns a filter which looks for a member with the specified name and kind.""" node = Node() parent = Parent() node_matches = (node.node_type == 'member') \ & (node.kind == kind) \ & (node.name == name) if namespace: parent_matches = (parent.node_type == 'compound') \ & ((parent.kind == 'namespace') | (parent.kind == 'class')) \ & (parent.name == namespace) return parent_matches & node_matches else: is_implementation_file = parent.name.endswith(self.implementation_filename_extensions) parent_is_compound = parent.node_type == 'compound' parent_is_file = (parent.kind == 'file') & (~ is_implementation_file) parent_is_not_file = parent.kind != 'file' return (parent_is_compound & parent_is_file & node_matches) \ | (parent_is_compound & parent_is_not_file & node_matches) def create_function_finder_filter(self, namespace, name): parent = Parent() parent_is_compound = parent.node_type == 'compound' parent_is_group = parent.kind == 'group' function_filter = self.create_member_finder_filter(namespace, name, 'function') # Get matching functions but only ones where the parent is not a group. We want to skip # function entries in groups as we'll find the same functions in a file's xml output # elsewhere and having more than one match is confusing for our logic later on. return function_filter & ~(parent_is_compound & parent_is_group) def create_enumvalue_finder_filter(self, name): """Returns a filter which looks for an enumvalue with the specified name.""" node = Node() return (node.node_type == 'enumvalue') & (node.name == name) def create_compound_finder_filter(self, name, kind): """Returns a filter which looks for a compound with the specified name and kind.""" node = Node() return (node.node_type == 'compound') & (node.kind == kind) & (node.name == name) def create_finder_filter(self, kind, name): """Returns a filter which looks for the compound node from the index which is a group node (kind=group) and has the appropriate name The compound node should reference the group file which we can parse for the group contents. """ if kind == 'group': filter_ = AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"]), InFilter(KindAccessor(Node()), ["group"]), InFilter(NameAccessor(Node()), [name]) ) else: # Assume kind == 'namespace' filter_ = AndFilter( InFilter(NodeTypeAccessor(Node()), ["compound"]), InFilter(KindAccessor(Node()), ["namespace"]), InFilter(NameAccessor(Node()), [name]) ) return filter_ def get_config_values(self, app): """Extract the breathe_default_members config value and store it. This method is called on the 'builder-init' event in Sphinx""" self.default_members = app.config.breathe_default_members self.implementation_filename_extensions = \ app.config.breathe_implementation_filename_extensions
bsd-3-clause
-4,144,635,030,783,446,500
32.336275
100
0.610988
false
4.454535
false
false
false
jjgoings/McMurchie-Davidson
mmd/utils/spectrum.py
1
3029
from __future__ import division import numpy as np """Contains some routines to do the (Pade approximant) Fourier transform as well as some peak-finding routines, useful for post processing a real-time calculation """ def genSpectra(time,dipole,signal): fw, frequency = pade(time,dipole) fw_sig, frequency = pade(time,signal) numerator = np.imag(fw) denominator = np.abs(fw_sig) spectra = ((4.0*27.21138602*2*frequency*np.pi*(numerator))/(3.0*137.036*denominator)) return frequency, spectra def pade(time,dipole): damp_const = 50.0 dipole = np.asarray(dipole) - dipole[0] stepsize = time[1] - time[0] damp = np.exp(-(stepsize*np.arange(len(dipole)))/float(damp_const)) dipole *= damp M = len(dipole) N = int(np.floor(M / 2)) num_pts = 20000 if N > num_pts: N = num_pts # G and d are (N-1) x (N-1) # d[k] = -dipole[N+k] for k in range(1,N) d = -dipole[N+1:2*N] try: from scipy.linalg import toeplitz, solve_toeplitz except ImportError: print("You'll need SciPy version >= 0.17.0") try: # Instead, form G = (c,r) as toeplitz #c = dipole[N:2*N-1] #r = np.hstack((dipole[1],dipole[N-1:1:-1])) b = solve_toeplitz((dipole[N:2*N-1],\ np.hstack((dipole[1],dipole[N-1:1:-1]))),d,check_finite=False) except np.linalg.linalg.LinAlgError: # OLD CODE: sometimes more stable # G[k,m] = dipole[N - m + k] for m,k in range(1,N) G = dipole[N + np.arange(1,N)[:,None] - np.arange(1,N)] b = np.linalg.solve(G,d) # Now make b Nx1 where b0 = 1 b = np.hstack((1,b)) # b[m]*dipole[k-m] for k in range(0,N), for m in range(k) a = np.dot(np.tril(toeplitz(dipole[0:N])),b) p = np.poly1d(a) q = np.poly1d(b) # If you want energies greater than 2*27.2114 eV, you'll need to change # the default frequency range to something greater. frequency = np.arange(0.00,2.0,0.0001) W = np.exp(-1j*frequency*stepsize) fw = p(W)/q(W) return fw, frequency def peaks(spectra,frequency,number=3,thresh=0.01): """ Return the peaks from the Fourier transform Variables: number: integer. number of peaks to print. thresh: float. Threshhold intensity for printing. Returns: Energy (eV), Intensity (depends on type of spectra) """ from scipy.signal import argrelextrema as pks # find all peak indices [idx], and remove those below thresh [jdx] idx = pks(np.abs(spectra),np.greater,order=3) jdx = np.where((np.abs(spectra[idx]) >= thresh)) kdx = idx[0][jdx[0]] # indices of peaks matching criteria if number > len(kdx): number = len(kdx) print("First "+str(number)+" peaks (eV) found: ") for i in xrange(number): print("{0:.4f}".format(frequency[kdx][i]*27.2114), "{0:.4f}".format(spectra[kdx][i]))
bsd-3-clause
-264,727,656,188,925,760
29.908163
89
0.584021
false
2.987179
false
false
false
danielfrg/remote-pip
rpip/tests/test_output.py
1
1204
from rpip.output import Output exit0 = {'exit_code': 0, 'stdout': 'yes', 'stderr': ''} exit1 = {'exit_code': 1, 'stdout': '', 'stderr': 'ERROR'} o0 = {'host1': exit0, 'host2': exit0, 'host3': exit0} o1 = {'host1': exit0, 'host2': exit1, 'host3': exit0} o2 = {'host1': exit0, 'host2': exit1, 'host3': exit1} def test_groupby(): o = Output(o0) groups = o.groupby() assert len(groups) == 1 nodes, output = groups[0] assert len(nodes) == 3 assert nodes == ['host3', 'host2', 'host1'] assert output == exit0 def test_groupby2(): o = Output(o1) groups = o.groupby() assert len(groups) == 2 nodes, output = groups[0] assert len(nodes) == 2 assert nodes == ['host3', 'host1'] assert output == exit0 nodes, output = groups[1] assert len(nodes) == 1 assert nodes == ['host2'] assert output == exit1 def test_groupby3(): o = Output(o2) groups = o.groupby() assert len(groups) == 2 nodes, output = groups[0] assert len(nodes) == 2 assert nodes == ['host3', 'host2'] assert output == exit1 nodes, output = groups[1] assert len(nodes) == 1 assert nodes == ['host1'] assert output == exit0
apache-2.0
3,832,086,671,021,986,300
22.607843
57
0.575581
false
3.087179
false
false
false
namlook/mongokit
mongokit/schema_document.py
1
42677
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2011, Nicolas Clairon # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the University of California, Berkeley nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import bson import datetime import logging from copy import deepcopy log = logging.getLogger(__name__) from mongokit.operators import SchemaOperator, IS from mongokit.helpers import DotCollapsedDict from mongokit.helpers import DotExpandedDict from mongokit.helpers import i18nDotedDict from mongokit.helpers import DotedDict __all__ = [ 'AuthorizedTypeError', 'BadKeyError', 'CustomType', 'DefaultFieldTypeError', 'DotCollapsedDict', 'DotedDict', 'DotExpandedDict', 'DuplicateDefaultValueError', 'DuplicateRequiredError', 'i18n', 'i18nError', 'ModifierOperatorError', 'RequireFieldError', 'SchemaDocument', 'SchemaDocumentError', 'SchemaProperties', 'SchemaTypeError', 'Set', 'StructureError', 'ValidationError', ] class CustomType(object): init_type = None mongo_type = None python_type = None def __init__(self): if self.mongo_type is None: raise TypeError("`mongo_type` property must be specify in %s" % self.__class__.__name__) if self.python_type is None: raise TypeError("`python_type` property must be specify in %s" % self.__class__.__name__) def to_bson(self, value): """convert type to a mongodb type""" raise NotImplementedError def to_python(self, value): """convert type to a mongodb type""" raise NotImplementedError def validate(self, value, path): """ This method is optional. It add a validation layer. This method is been called in Document.validate() value: the value of the field path: the field name (ie, 'foo' or 'foo.bar' if nested) """ pass # field wich does not need to be declared into the structure STRUCTURE_KEYWORDS = [] class SchemaDocumentError(Exception): pass class RequireFieldError(SchemaDocumentError): pass class StructureError(SchemaDocumentError): pass class BadKeyError(SchemaDocumentError): pass class AuthorizedTypeError(SchemaDocumentError): pass class ValidationError(SchemaDocumentError): pass class DuplicateRequiredError(SchemaDocumentError): pass class DuplicateDefaultValueError(SchemaDocumentError): pass class ModifierOperatorError(SchemaDocumentError): pass class SchemaTypeError(SchemaDocumentError): pass class DefaultFieldTypeError(SchemaDocumentError): pass class i18nError(SchemaDocumentError): pass class DeprecationError(Exception): pass class DuplicateI18nError(Exception): pass class SchemaProperties(type): def __new__(mcs, name, bases, attrs): attrs['_protected_field_names'] = set( ['_protected_field_names', '_namespaces', '_required_namespace']) for base in bases: parent = base.__mro__[0] if not hasattr(parent, 'structure'): continue if parent.structure is not None: #parent = parent() if parent.structure: if 'structure' not in attrs and parent.structure: attrs['structure'] = parent.structure.copy() else: obj_structure = attrs.get('structure', {}).copy() attrs['structure'] = parent.structure.copy() attrs['structure'].update(obj_structure) if parent.required_fields: attrs['required_fields'] = list(set( attrs.get('required_fields', [])+parent.required_fields)) if parent.default_values: obj_default_values = attrs.get('default_values', {}).copy() attrs['default_values'] = parent.default_values.copy() attrs['default_values'].update(obj_default_values) if parent.validators: obj_validators = attrs.get('validators', {}).copy() attrs['validators'] = parent.validators.copy() attrs['validators'].update(obj_validators) if parent.i18n: attrs['i18n'] = list(set( attrs.get('i18n', [])+parent.i18n)) if attrs.get('authorized_types'): attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types']))) for mro in bases[0].__mro__: attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__)) attrs['_protected_field_names'] = list(attrs['_protected_field_names']) if attrs.get('structure') and name not in \ ["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]: base = bases[0] if not attrs.get('authorized_types'): attrs['authorized_types'] = base.authorized_types base._validate_structure(attrs['structure'], name, attrs.get('authorized_types')) attrs['_namespaces'] = list(base._SchemaDocument__walk_dict(attrs['structure'])) if [1 for i in attrs['_namespaces'] if type(i) is type]: raise DeprecationError("%s: types are not allowed as structure key anymore" % name) mcs._validate_descriptors(attrs) ## building required fields namespace attrs['_required_namespace'] = set([]) for rf in attrs.get('required_fields', []): splited_rf = rf.split('.') for index in range(len(splited_rf)): attrs['_required_namespace'].add(".".join(splited_rf[:index+1])) attrs['_collapsed_struct'] = DotCollapsedDict(attrs['structure'], remove_under_type=True) elif attrs.get('structure') is not None and name not in \ ["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]: attrs['_collapsed_struct'] = {} attrs['_i18n_namespace'] = [] if attrs.get('i18n'): attrs['_i18n_namespace'] = set(['.'.join(i.split('.')[:-1]) for i in attrs['i18n']]) return type.__new__(mcs, name, bases, attrs) @classmethod def _validate_descriptors(mcs, attrs): # TODO i18n validator for dv in attrs.get('default_values', {}): if not dv in attrs['_namespaces']: raise ValueError("Error in default_values: can't find %s in structure" % dv) for required in attrs.get('required_fields', []): if required not in attrs['_namespaces']: raise ValueError("Error in required_fields: can't find %s in structure" % required) for validator in attrs.get('validators', {}): if validator not in attrs['_namespaces']: raise ValueError("Error in validators: can't find %s in structure" % validator) # required_field if attrs.get('required_fields'): if len(attrs['required_fields']) != len(set(attrs['required_fields'])): raise DuplicateRequiredError("duplicate required_fields : %s" % attrs['required_fields']) # i18n if attrs.get('i18n'): if len(attrs['i18n']) != len(set(attrs['i18n'])): raise DuplicateI18nError("duplicated i18n : %s" % attrs['i18n']) for _i18n in attrs['i18n']: if _i18n not in attrs['_namespaces']: raise ValueError("Error in i18n: can't find {} in structure".format(_i18n)) class SchemaDocument(dict): """ A SchemaDocument is dictionary with a building structured schema The validate method will check that the document match the underling structure. A structure must be specify in each SchemaDocument. >>> class TestDoc(SchemaDocument): ... structure = { ... "foo":unicode, ... "bar":int, ... "nested":{ ... "bla":float}} `unicode`, `int`, `float` are python types listed in `mongokit.authorized_types`. >>> doc = TestDoc() >>> doc {'foo': None, 'bar': None, 'nested': {'bla': None}} A SchemaDocument works just like dict: >>> doc['bar'] = 3 >>> doc['foo'] = "test" We can describe fields as required with the required attribute: >>> TestDoc.required_fields = ['bar', 'nested.bla'] >>> doc = TestDoc() >>> doc['bar'] = 2 Validation is made with the `validate()` method: >>> doc.validate() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Traceback (most recent call last): ... RequireFieldError: nested.bla is required Default values can be set by using the attribute default_values : >>> TestDoc.default_values = {"bar":3, "nested.bla":2.0} >>> doc = TestDoc() >>> doc {'foo': None, 'bar': 3, 'nested': {'bla': 2.0}} >>> doc.validate() Validators can be added in order to validate some values : >>> TestDoc.validators = {"bar":lambda x: x>0, "nested.bla": lambda x: x<0} >>> doc = TestDoc() >>> doc['bar'] = 3 >>> doc['nested']['bla'] = 2.0 >>> doc.validate() Traceback (most recent call last): ... ValidationError: nested.bla does not pass the validator <lambda> If you want to use the dot notation (ala json), you must set the `use_dot_notation` attribute to True: >>> class TestDotNotation(SchemaDocument): ... structure = { ... "foo":{ "bar":unicode} ... } ... use_dot_notation=True >>> doc = TestDotNotation() >>> doc.foo.bar = u"bla" >>> doc {"foo":{"bar":u"bla}} """ __metaclass__ = SchemaProperties structure = None required_fields = [] default_values = {} validators = {} i18n = [] raise_validation_errors = True skip_validation = False # if you want to have all schemaless benefits (default False but should change) # warning, if use_schemaless is True, Migration features can not be used. use_schemaless = False # If you want to use the dot notation, set this to True: use_dot_notation = False dot_notation_warning = False authorized_types = [ type(None), bool, int, long, float, unicode, basestring, list, dict, datetime.datetime, bson.binary.Binary, CustomType, ] def __init__(self, doc=None, gen_skel=True, _gen_auth_types=True, _validate=True, lang='en', fallback_lang='en'): """ doc : a dictionary gen_skel : if True, generate automatically the skeleton of the doc filled with NoneType each time validate() is called. Note that if doc is not {}, gen_skel is always False. If gen_skel is False, default_values cannot be filled. gen_auth_types: if True, generate automatically the self.authorized_types attribute from self.authorized_types """ super(SchemaDocument, self).__init__() if self.structure is None: self.structure = {} self._current_lang = lang self._fallback_lang = fallback_lang self.validation_errors = {} # init if doc: for k, v in doc.iteritems(): self[k] = v gen_skel = False if gen_skel: self.generate_skeleton() if self.default_values: self._set_default_fields(self, self.structure) else: self._process_custom_type('python', self, self.structure) if self.use_dot_notation: self.__generate_doted_dict(self, self.structure) if self.i18n: self._make_i18n() def generate_skeleton(self): """ validate and generate the skeleton of the document from the structure (unknown values are set to None) """ self.__generate_skeleton(self, self.structure) def validate(self): """ validate the document. This method will verify if : * the doc follow the structure, * all required fields are filled Additionally, this method will process all validators. """ if self.validators: self._process_validators(self, self.structure) self._process_custom_type('bson', self, self.structure) self._validate_doc(self, self.structure) self._process_custom_type('python', self, self.structure) if self.required_fields: self._validate_required(self, self.structure) def __setattr__(self, key, value): if key not in self._protected_field_names and self.use_dot_notation and key in self: if isinstance(self.structure[key], i18n): self[key][self._current_lang] = value else: self[key] = value else: if self.dot_notation_warning and not key.startswith('_') and key not in \ ['db', 'collection', 'versioning_collection', 'connection', 'fs']: log.warning("dot notation: {} was not found in structure. Add it as attribute instead".format(key)) dict.__setattr__(self, key, value) def __getattr__(self, key): if key not in self._protected_field_names and self.use_dot_notation and key in self: if isinstance(self[key], i18n): if self._current_lang not in self[key]: return self[key].get(self._fallback_lang) return self[key][self._current_lang] return self[key] else: return dict.__getattribute__(self, key) # # Public API end # @classmethod def __walk_dict(cls, dic): # thanks jean_b for the patch for key, value in dic.items(): if isinstance(value, dict) and len(value): if type(key) is type: yield '$%s' % key.__name__ else: yield key for child_key in cls.__walk_dict(value): if type(key) is type: new_key = "$%s" % key.__name__ else: new_key = key #if type(child_key) is type: # new_child_key = "$%s" % child_key.__name__ #else: if type(child_key) is not type: new_child_key = child_key yield '%s.%s' % (new_key, new_child_key) elif type(key) is type: yield '$%s' % key.__name__ # elif isinstance(value, list) and len(value): # if isinstance(value[0], dict): # for child_key in cls.__walk_dict(value[0]): # #if type(key) is type: # # new_key = "$%s" % key.__name__ # #else: # if type(key) is not type: # new_key = key # #if type(child_key) is type: # # new_child_key = "$%s" % child_key.__name__ # #else: # if type(child_key) is not type: # new_child_key = child_key # yield '%s.%s' % (new_key, new_child_key) # else: # if type(key) is not type: # yield key # #else: # # yield "" else: if type(key) is not type: yield key #else: # yield "" @classmethod def _validate_structure(cls, structure, name, authorized_types): """ validate if all fields in self.structure are in authorized types. """ ############## def __validate_structure(struct, name, _authorized): if type(struct) is type: if struct not in authorized_types: if struct not in authorized_types: raise StructureError("%s: %s is not an authorized type" % (name, struct)) elif isinstance(struct, dict): for key in struct: if isinstance(key, basestring): if "." in key: raise BadKeyError("%s: %s must not contain '.'" % (name, key)) if key.startswith('$'): raise BadKeyError("%s: %s must not start with '$'" % (name, key)) elif type(key) is type: if not key in authorized_types: raise AuthorizedTypeError("%s: %s is not an authorized type" % (name, key)) else: raise StructureError("%s: %s must be a basestring or a type" % (name, key)) if struct[key] is None: pass elif isinstance(struct[key], dict): __validate_structure(struct[key], name, authorized_types) elif isinstance(struct[key], list): __validate_structure(struct[key], name, authorized_types) elif isinstance(struct[key], tuple): __validate_structure(struct[key], name, authorized_types) elif isinstance(struct[key], CustomType): __validate_structure(struct[key].mongo_type, name, authorized_types) elif isinstance(struct[key], SchemaProperties): pass elif isinstance(struct[key], SchemaOperator): __validate_structure(struct[key], name, authorized_types) elif hasattr(struct[key], 'structure'): __validate_structure(struct[key], name, authorized_types) elif struct[key] not in authorized_types: ok = False for auth_type in authorized_types: if struct[key] is None: ok = True else: try: if isinstance(struct[key], auth_type) or issubclass(struct[key], auth_type): ok = True except TypeError: raise TypeError("%s: %s is not a type" % (name, struct[key])) if not ok: raise StructureError( "%s: %s is not an authorized type" % (name, struct[key])) elif isinstance(struct, list) or isinstance(struct, tuple): for item in struct: __validate_structure(item, name, authorized_types) elif isinstance(struct, SchemaOperator): if isinstance(struct, IS): for operand in struct: if type(operand) not in authorized_types: raise StructureError("%s: %s in %s is not an authorized type (%s found)" % ( name, operand, struct, type(operand).__name__)) else: for operand in struct: if operand not in authorized_types: raise StructureError("%s: %s in %s is not an authorized type (%s found)" % ( name, operand, struct, type(operand).__name__)) elif isinstance(struct, SchemaProperties): pass else: ok = False for auth_type in authorized_types: if isinstance(struct, auth_type): ok = True if not ok: raise StructureError("%s: %s is not an authorized_types" % (name, struct)) ################# if structure is None: raise StructureError("%s.structure must not be None" % name) if not isinstance(structure, dict): raise StructureError("%s.structure must be a dict instance" % name) __validate_structure(structure, name, authorized_types) def _raise_exception(self, exception, field, message): if self.raise_validation_errors: raise exception(message) else: if not field in self.validation_errors: self.validation_errors[field] = [] self.validation_errors[field].append(exception(message)) def _validate_doc(self, doc, struct, path=""): """ check if doc field types match the doc field structure """ if type(struct) is type or struct is None: if struct is None: if type(doc) not in self.authorized_types: self._raise_exception(AuthorizedTypeError, type(doc).__name__, "%s is not an authorized types" % type(doc).__name__) elif not isinstance(doc, struct) and doc is not None: self._raise_exception(SchemaTypeError, path, "%s must be an instance of %s not %s" % ( path, struct.__name__, type(doc).__name__)) elif isinstance(struct, CustomType): if not isinstance(doc, struct.mongo_type) and doc is not None: self._raise_exception(SchemaTypeError, path, "%s must be an instance of %s not %s" % ( path, struct.mongo_type.__name__, type(doc).__name__)) struct.validate(doc, path=path) elif isinstance(struct, SchemaOperator): if not struct.validate(doc) and doc is not None: if isinstance(struct, IS): self._raise_exception(SchemaTypeError, path, "%s must be in %s not %s" % (path, struct._operands, doc)) else: self._raise_exception(SchemaTypeError, path, "%s must be an instance of %s not %s" % (path, struct, type(doc).__name__)) elif isinstance(struct, dict): if not isinstance(doc, type(struct)): self._raise_exception(SchemaTypeError, path, "%s must be an instance of %s not %s" % ( path, type(struct).__name__, type(doc).__name__)) struct_length = len(struct) if not '_id' in struct else len(struct) - 1 if len(doc) != struct_length: struct_doc_diff = list(set(struct).difference(set(doc))) if struct_doc_diff: for field in struct_doc_diff: if (type(field) is not type) and (not self.use_schemaless): self._raise_exception(StructureError, None, "missed fields %s in %s" % (struct_doc_diff, type(doc).__name__)) else: struct_struct_diff = list(set(doc).difference(set(struct))) bad_fields = [s for s in struct_struct_diff if s not in STRUCTURE_KEYWORDS] if bad_fields and not self.use_schemaless: self._raise_exception(StructureError, None, "unknown fields %s in %s" % (bad_fields, type(doc).__name__)) for key in struct: if type(key) is type: new_key = "$%s" % key.__name__ else: new_key = key new_path = ".".join([path, new_key]).strip('.') if new_key.split('.')[-1].startswith("$"): for doc_key in doc: if not isinstance(doc_key, key): self._raise_exception(SchemaTypeError, path, "key of %s must be an instance of %s not %s" % ( path, key.__name__, type(doc_key).__name__)) self._validate_doc(doc[doc_key], struct[key], new_path) else: if key in doc: self._validate_doc(doc[key], struct[key], new_path) elif isinstance(struct, list): if not isinstance(doc, list) and not isinstance(doc, tuple): self._raise_exception(SchemaTypeError, path, "%s must be an instance of list not %s" % (path, type(doc).__name__)) if not len(struct): struct = None else: struct = struct[0] for obj in doc: self._validate_doc(obj, struct, path) elif isinstance(struct, tuple): if not isinstance(doc, list) and not isinstance(doc, tuple): self._raise_exception(SchemaTypeError, path, "%s must be an instance of list not %s" % ( path, type(doc).__name__)) if len(doc) != len(struct): self._raise_exception(SchemaTypeError, path, "%s must have %s items not %s" % ( path, len(struct), len(doc))) for i in range(len(struct)): self._validate_doc(doc[i], struct[i], path) def _process_validators(self, doc, _struct, _path=""): doted_doc = DotCollapsedDict(doc) for key, validators in self.validators.iteritems(): if key in doted_doc and doted_doc[key] is not None: if not hasattr(validators, "__iter__"): validators = [validators] for validator in validators: try: if not validator(doted_doc[key]): raise ValidationError("%s does not pass the validator " + validator.__name__) except Exception, e: self._raise_exception(ValidationError, key, unicode(e) % key) def _process_custom_type(self, target, doc, struct, path="", root_path=""): for key in struct: if type(key) is type: new_key = "$%s" % key.__name__ else: new_key = key new_path = ".".join([path, new_key]).strip('.') # # if the value is a dict, we have a another structure to validate # # # It is not a dict nor a list but a simple key:value # if isinstance(struct[key], CustomType): if target == 'bson': if key in doc: if struct[key].python_type is not None: if not isinstance(doc[key], struct[key].python_type) and doc[key] is not None: self._raise_exception(SchemaTypeError, new_path, "%s must be an instance of %s not %s" % ( new_path, struct[key].python_type.__name__, type(doc[key]).__name__)) doc[key] = struct[key].to_bson(doc[key]) else: if key in doc: doc[key] = struct[key].to_python(doc[key]) elif isinstance(struct[key], dict): if doc: # we don't need to process an empty doc if type(key) is type: for doc_key in doc: # process type's key such {unicode:int}... self._process_custom_type(target, doc[doc_key], struct[key], new_path, root_path) else: if key in doc: # we don't care about missing fields self._process_custom_type(target, doc[key], struct[key], new_path, root_path) # # If the struct is a list, we have to validate all values into it # elif type(struct[key]) is list: # # check if the list must not be null # if struct[key]: l_objs = [] if isinstance(struct[key][0], CustomType): for obj in doc[key]: if target == 'bson': if struct[key][0].python_type is not None: if not isinstance(obj, struct[key][0].python_type) and obj is not None: self._raise_exception(SchemaTypeError, new_path, "%s must be an instance of %s not %s" % ( new_path, struct[key][0].python_type.__name__, type(obj).__name__)) obj = struct[key][0].to_bson(obj) else: obj = struct[key][0].to_python(obj) l_objs.append(obj) doc[key] = l_objs elif isinstance(struct[key][0], dict): if doc.get(key): for obj in doc[key]: self._process_custom_type(target, obj, struct[key][0], new_path, root_path) def _set_default_fields(self, doc, struct, path=""): # TODO check this out, this method must be restructured for key in struct: new_key = key new_path = ".".join([path, new_key]).strip('.') # # default_values : # if the value is None, check if a default value exist. # if exists, and it is a function then call it otherwise, # juste feed it # if type(key) is not type: if doc[key] is None and new_path in self.default_values: new_value = self.default_values[new_path] if callable(new_value): new_value = new_value() elif isinstance(new_value, dict): new_value = deepcopy(new_value) elif isinstance(new_value, list): new_value = new_value[:] if isinstance(struct[key], CustomType): if not isinstance(new_value, struct[key].python_type): self._raise_exception(DefaultFieldTypeError, new_path, "%s must be an instance of %s not %s" % ( new_path, struct[key].python_type.__name__, type(new_value).__name__)) doc[key] = new_value # # if the value is a dict, we have a another structure to validate # if isinstance(struct[key], dict) and new_path not in self.i18n: # # if the dict is still empty into the document we build # it with None values # if len(struct[key]) and not [i for i in struct[key].keys() if type(i) is type]: self._set_default_fields(doc[key], struct[key], new_path) else: if new_path in self.default_values: new_value = self.default_values[new_path] if callable(new_value): new_value = new_value() elif isinstance(new_value, dict): new_value = deepcopy(new_value) elif isinstance(new_value, list): new_value = new_value[:] doc[key] = new_value elif isinstance(struct[key], list): if new_path in self.default_values: for new_value in self.default_values[new_path]: if callable(new_value): new_value = new_value() elif isinstance(new_value, dict): new_value = deepcopy(new_value) elif isinstance(new_value, list): new_value = new_value[:] if isinstance(struct[key][0], CustomType): if not isinstance(new_value, struct[key][0].python_type): self._raise_exception(DefaultFieldTypeError, new_path, "%s must be an instance of %s not %s" % ( new_path, struct[key][0].python_type.__name__, type(new_value).__name__)) doc[key].append(new_value) else: # what else if new_path in self.default_values: new_value = self.default_values[new_path] if callable(new_value): new_value = new_value() elif isinstance(new_value, dict): new_value = deepcopy(new_value) elif isinstance(new_value, list): new_value = new_value[:] if new_path in self.i18n: doc[key] = i18n( field_type=struct[key], field_name=key ) doc[key].update(new_value) else: doc[key] = new_value def _validate_required(self, doc, _struct, _path="", _root_path=""): doted_struct = DotCollapsedDict(self.structure) doted_doc = DotCollapsedDict(doc, reference=doted_struct) for req in self.required_fields: if doted_doc.get(req) is None and doted_struct.get(req) is not dict: if not isinstance(doted_struct.get(req), CustomType): self._raise_exception(RequireFieldError, req, "%s is required" % req) elif isinstance(doted_struct.get(req), CustomType) and doted_struct[req].mongo_type is not dict: self._raise_exception(RequireFieldError, req, "%s is required" % req) elif doted_doc.get(req) == []: self._raise_exception(RequireFieldError, req, "%s is required" % req) elif doted_doc.get(req) == {}: self._raise_exception(RequireFieldError, req, "%s is required" % req) def __generate_skeleton(self, doc, struct, path=""): for key in struct: if type(key) is type: new_key = "$%s" % key.__name__ else: new_key = key new_path = ".".join([path, new_key]).strip('.') # # Automatique generate the skeleton with NoneType # if type(key) is not type and key not in doc: if isinstance(struct[key], dict): if type(struct[key]) is dict and self.use_dot_notation: if new_path in self._i18n_namespace: doc[key] = i18nDotedDict(doc.get(key, {}), self) else: doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning) else: if callable(struct[key]): doc[key] = struct[key]() else: doc[key] = type(struct[key])() elif struct[key] is dict: doc[key] = {} elif isinstance(struct[key], list): doc[key] = type(struct[key])() elif isinstance(struct[key], CustomType): if struct[key].init_type is not None: doc[key] = struct[key].init_type() else: doc[key] = None elif struct[key] is list: doc[key] = [] elif isinstance(struct[key], tuple): doc[key] = [None for _ in range(len(struct[key]))] else: doc[key] = None # # if the value is a dict, we have a another structure to validate # if isinstance(struct[key], dict) and type(key) is not type: self.__generate_skeleton(doc[key], struct[key], new_path) def __generate_doted_dict(self, doc, struct, path=""): for key in struct: # # Automatique generate the skeleton with NoneType # if type(key) is type: new_key = "$%s" % key.__name__ else: new_key = key new_path = ".".join([path, new_key]).strip('.') if type(key) is not type: # and key not in doc: if isinstance(struct[key], dict): if type(struct[key]) is dict: if new_path in self._i18n_namespace: doc[key] = i18nDotedDict(doc.get(key, {}), self) else: doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning) # # if the value is a dict, we have a another structure to validate # if isinstance(struct[key], dict) and type(key) is not type: self.__generate_doted_dict(doc[key], struct[key], new_path) def _make_i18n(self): doted_dict = DotCollapsedDict(self.structure) for field in self.i18n: if field not in doted_dict: self._raise_exception(ValidationError, field, "%s not found in structure" % field) if not isinstance(doted_dict[field], i18n): doted_dict[field] = i18n( field_type=doted_dict[field], field_name=field ) self.structure.update(DotExpandedDict(doted_dict)) def set_lang(self, lang): self._current_lang = lang def get_lang(self): return self._current_lang class i18n(dict, CustomType): """ CustomType to deal with i18n """ mongo_type = list def __init__(self, field_type=None, field_name=None): super(i18n, self).__init__() self.python_type = self.__class__ self._field_type = field_type self._field_name = field_name def __call__(self): return i18n(self._field_type, self._field_name) def to_bson(self, value): if value is not None: for l, v in value.iteritems(): if isinstance(v, list) and isinstance(self._field_type, list): for i in v: if not isinstance(i, self._field_type[0]): raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % ( self._field_name, l, self._field_type[0], type(i).__name__)) else: if not isinstance(v, self._field_type): raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % ( self._field_name, l, self._field_type, type(v).__name__)) return [{'lang': l, 'value': v} for l, v in value.iteritems()] def to_python(self, value): if value is not None: i18n_dict = self.__class__(self._field_type) for i in value: i18n_dict[i['lang']] = i['value'] return i18n_dict class Set(CustomType): """ SET custom type to handle python set() type """ init_type = set mongo_type = list python_type = set def __init__(self, structure_type=None): super(Set, self).__init__() self._structure_type = structure_type def to_bson(self, value): if value is not None: return list(value) def to_python(self, value): if value is not None: return set(value) def validate(self, value, path): if value is not None and self._structure_type is not None: for val in value: if not isinstance(val, self._structure_type): raise ValueError('%s must be an instance of %s not %s' % (path, self._structure_type.__name__, type(val).__name__))
bsd-3-clause
3,304,417,042,122,618,000
42.996907
117
0.506455
false
4.552699
false
false
false
JohnUrban/fast5tools
bin/samGenomicWindows.py
1
5426
#!/usr/bin/env python2.7 import argparse from collections import defaultdict from fast5tools.samclass import * from fast5tools.samops import * parser = argparse.ArgumentParser(description=""" DESCRIPTION Given a SAM file (with F5:Z: info attached) that is sorted by read name: - get the alignment or set of splitread alignments for each read - determine most likely genomic region read came from (assuming no structural variation) - if one alignment, assume it comes from there - if multiple alignments, check for overlap of their individual genomic windows (alignment adjusted for clipping on each side + flank/buffer) if no merges, use majority or longest alignment (majority is longest alignment that also meets a majority threshold) if there is a single merge -- i.e. they all come from same genomic region (and perhaps required to be ordered and stranded - see options) - use merged result from merged genomic windows if there is 1 or more merges (but still more than 1 genomic region) see if longest merge has a 'spanning alignment' longer than longest/majority alignment if so use that, if not use the longest/majority alignment - report on alignments and merges in all cases - get coordinates for a window that surrounds that chosen genomic region - this is the chosen genomic window for that read - coordinates for genomic window should be proportional to read length + some extra buffering/flanking sequence - print out gw coordinates, notes on choice, F5 info, and perhaps genomic sequence chosen flank=0.25, merge_dist=0, majority=0.5, require_order=False, require_strand=False, reference=False flank = Add buffer/flank lengths to each side of a genomic window in two ways: (1) int > 1 adds/subtracts that int. (2) float [0,1] adds/subtracts that proportion of read length NOTE: 1 defaults to 100% of read length, not 1 bp merge_dist: allows a gap up to d between intervals to still be an overlap - default 0 majority threshold to exceed to be considered a majority. require_order when True, alignments must be ordered as they appear in the read to be considered a valid merge. Defaults to False as noisy alignments could easily break this. Status is reported in output anyway. require_strand when True, alignments must ALL be on the same strand to be considered a valid merge. Defaults to False as noisy alignments could easily break this. Status is reported in output anyway. """, formatter_class= argparse.RawTextHelpFormatter) parser_input = parser.add_mutually_exclusive_group(required=True) parser_input.add_argument('--sam', '-s', type= str, default=False, help='''Input file in SAM format.''') ## FOR NOW, MUST BE SAM -- NOT BAM -- but can be STDIN SAM ##parser_input.add_argument('--bam', '-b', ## type= str, default=False, ## help='''Input file in BAM format.''') parser.add_argument('--flank', '-f', type=float, default=0.25, help=''' ''') parser.add_argument('--merge_dist', '-m', type=int, default=0, help=''' ''') parser.add_argument('--majority', '-M', type=float, default=0.5, help=''' ''') parser.add_argument('--require_order', '-ro', action='store_true', default=False, help=''' ''') parser.add_argument('--require_strand', '-rs', action='store_true', default=False, help=''' ''') parser.add_argument('--reference', '-r', type=str, default=False, help=''' Path to reference genome file to be used to extract sequences corresponding to genomic windows identified. Optional. Sequences will be tagged on to an additional end column if provided.''') parser.add_argument('--getF5info', '-f5', action='store_true', default=False, help='''Return F5:Z: field from fast5tools in output. This is from extracting fasta/fastq using fast5tofastx.py with --comments and --samflag''') parser.add_argument('--getBCinfo', '-BC', action='store_true', default=False, help=''' Return BC:Z: field from fast5tools in output. This is from creating fasta/fastq from output of fast5_sw_bardecoder.py specified with --sequence/--quals, and merging all desired barcode info into string following BC:Z:''') parser.add_argument('--do_not_adjust_window_for_clipping', '-noadjust', action='store_true', default=False, help=''' By default, the genomic window is pushed out at least as far as it would need to be to include soft/hard clipped regions at 5'/3' ends. This turns it off.''') args = parser.parse_args() get_genomic_windows(samfilepath=args.sam, flank=args.flank, merge_dist=args.merge_dist, majority=args.majority, require_order=args.require_order, require_strand=args.require_strand, reference=args.reference, getF5field=args.getF5info, getBCfield=args.getBCinfo, adjust_for_clipping_in_output=(not args.do_not_adjust_window_for_clipping))
mit
6,325,595,526,689,239,000
51.679612
337
0.651861
false
4.170638
false
false
false
DigitalCampus/django-oppia
api/resources/course.py
1
9945
import json import os import re import shutil import xmltodict import zipfile from django.conf import settings from django.conf.urls import url from django.core.exceptions import MultipleObjectsReturned from django.db.models import Q from django.http import HttpResponse, Http404 from django.utils.translation import ugettext_lazy as _ from tastypie import fields from tastypie.authentication import ApiKeyAuthentication, Authentication from tastypie.authorization import ReadOnlyAuthorization, Authorization from tastypie.resources import ModelResource from tastypie.utils import trailing_slash from api.serializers import CourseJSONSerializer from oppia.models import Tracker, Course, CourseCategory from oppia.signals import course_downloaded STR_COURSE_NOT_FOUND = _(u"Course not found") def get_course_from_shortname(resource, bundle, lookup): object_list = resource.apply_filters(bundle.request, {'shortname': lookup}) if len(object_list) <= 0: raise resource._meta.object_class.DoesNotExist( "Couldn't find an course with shortname '%s'." % (lookup)) elif len(object_list) > 1: raise MultipleObjectsReturned( "More than one course with shortname '%s'." % (lookup)) return object_list class CourseResource(ModelResource): class Meta: queryset = Course.objects.all() resource_name = 'course' allowed_methods = ['get'] fields = ['id', 'title', 'version', 'shortname', 'priority', 'is_draft', 'description', 'author', 'username', 'organisation'] authentication = ApiKeyAuthentication() authorization = ReadOnlyAuthorization() serializer = CourseJSONSerializer() always_return_data = True include_resource_uri = True def obj_get(self, bundle, **kwargs): """ Overriden get method to perform a direct lookup if we are searching by shortname instead of pk """ lookup = kwargs[self._meta.detail_uri_name] if re.search('[a-zA-Z]', lookup): object_list = get_course_from_shortname(self, bundle, lookup) bundle.obj = object_list[0] self.authorized_read_detail(object_list, bundle) return bundle.obj else: return super().obj_get(bundle, **kwargs) def get_object_list(self, request): if request.user.is_staff: return Course.objects.filter(is_archived=False) \ .order_by('-priority', 'title') else: return Course.objects.filter(is_archived=False) \ .filter( Q(is_draft=False) | (Q(is_draft=True) & Q(user=request.user))) \ .order_by('-priority', 'title') def prepend_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('download_course'), name="api_download_course"), url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/activity%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('download_activity'), name="api_download_activity"), ] def get_course(self, request, **kwargs): self.is_authenticated(request) self.throttle_check(request) pk = kwargs.pop('pk', None) try: if request.user.is_staff: course = self._meta.queryset.get(pk=pk, is_archived=False) else: course = self._meta.queryset \ .filter( Q(is_draft=False) | (Q(is_draft=True) & Q(user=request.user)) | (Q(is_draft=True) & Q(coursepermissions__user=request.user))) \ .distinct().get(pk=pk, is_archived=False) except Course.DoesNotExist: raise Http404(STR_COURSE_NOT_FOUND) except ValueError: try: if request.user.is_staff: course = self._meta.queryset.get(shortname=pk, is_archived=False) else: course = self._meta.queryset \ .filter( Q(is_draft=False) | (Q(is_draft=True) & Q(user=request.user)) | (Q(is_draft=True) & Q(coursepermissions__user=request.user))) \ .distinct().get(shortname=pk, is_archived=False) except Course.DoesNotExist: raise Http404(STR_COURSE_NOT_FOUND) return course def download_course(self, request, **kwargs): course = self.get_course(request, **kwargs) file_to_download = course.getAbsPath() has_completed_trackers = Tracker.has_completed_trackers(course, request.user) try: if has_completed_trackers: file_to_download = os.path.join( settings.COURSE_UPLOAD_DIR, "temp", str(request.user.id) + "-" + course.filename) shutil.copy2(course.getAbsPath(), file_to_download) course_zip = zipfile.ZipFile(file_to_download, 'a') if has_completed_trackers: course_zip.writestr(course.shortname + "/tracker.xml", Tracker.to_xml_string(course, request.user)) course_zip.close() binary_file = open(file_to_download, 'rb') response = HttpResponse(binary_file.read(), content_type='application/zip') binary_file.close() response['Content-Length'] = os.path.getsize(file_to_download) response['Content-Disposition'] = \ 'attachment; filename="%s"' % (course.filename) except IOError: raise Http404(STR_COURSE_NOT_FOUND) course_downloaded.send(sender=self, course=course, request=request) return response def download_activity(self, request, **kwargs): course = self.get_course(request, **kwargs) return HttpResponse(Tracker.to_xml_string(course, request.user), content_type='text/xml') def dehydrate(self, bundle): bundle.data['url'] = bundle.request.build_absolute_uri( bundle.data['resource_uri'] + 'download/') # make sure title is shown as json object (not string representation \ # of one) bundle.data['title'] = json.loads(bundle.data['title']) try: bundle.data['description'] = json.loads(bundle.data['description']) except json.JSONDecodeError: pass course = Course.objects.get(pk=bundle.obj.pk) if course and course.user: bundle.data['author'] = course.user.first_name \ + " " \ + course.user.last_name bundle.data['username'] = course.user.username bundle.data['organisation'] = course.user.userprofile.organisation return bundle class CourseCategoryResource(ModelResource): course = fields.ToOneField('api.resource.course.CourseResource', 'course', full=True) class Meta: queryset = CourseCategory.objects.all() allowed_methods = ['get'] resource_name = 'coursetag' fields = ['id', 'course', 'category'] include_resource_uri = False authentication = ApiKeyAuthentication() authorization = ReadOnlyAuthorization() always_return_data = True class CourseStructureResource(ModelResource): class Meta: queryset = Course.objects.filter(is_draft=False, is_archived=False) resource_name = 'coursestructure' allowed_methods = ['get'] fields = ['shortname', 'id', 'structure'] authentication = Authentication() authorization = Authorization() serializer = CourseJSONSerializer() always_return_data = True include_resource_uri = True def obj_get(self, bundle, **kwargs): """ Overriden get method to perform a direct lookup if we are searching by shortname instead of pk """ lookup = kwargs[self._meta.detail_uri_name] if re.search('[a-zA-Z]', lookup): object_list = get_course_from_shortname(self, bundle, lookup) return_obj = object_list[0] else: return_obj = super().obj_get(bundle, **kwargs) # check the module.xml is on disk path = os.path.join(settings.MEDIA_ROOT, 'courses', return_obj.shortname, 'module.xml') if not os.path.isfile(path): raise self._meta.object_class.DoesNotExist() return return_obj def dehydrate(self, bundle): path = os.path.join(settings.MEDIA_ROOT, 'courses', bundle.obj.shortname, 'module.xml') with open(path) as fd: doc = xmltodict.parse(fd.read()) bundle.data['structure'] = json.dumps(doc) return bundle
gpl-3.0
3,532,451,220,382,889,000
37.103448
79
0.539769
false
4.502037
false
false
false
yilei0620/3D_Conditional_Gan
GenSample_obj.py
1
4544
import sys sys.path.append('..') import os import json from time import time import numpy as np from sklearn.externals import joblib import scipy from scipy import io # from matplotlib import pyplot as plt # from sklearn.externals import joblib import theano import theano.tensor as T from lib import activations from lib import updates from lib import inits from lib.rng import py_rng, np_rng from lib.ops import batchnorm, conv_cond_concat, conv, dropout from lib.theano_utils import floatX, sharedX from lib.data_utils import OneHot, shuffle, iter_data from lib.metrics import nnc_score, nnd_score from load import load_shapenet_train, load_shapenet_test relu = activations.Rectify() sigmoid = activations.Sigmoid() lrelu = activations.LeakyRectify() bce = T.nnet.binary_crossentropy parameters = {'objectNumber': 2, 'Nz' : 200, 'Channel' :(1,64,128,256,512), 'kernal':(4,4,4,4), 'batchsize': 50, 'Convlayersize':(64,32,16,8,4), 'Genlrt' : 0.001, 'Discrimlrt' : 0.00001 , 'beta' : 0.5, 'l2':2.5e-5, 'Genk' : 2 , 'niter':50, 'niter_decay' : 150} for p in parameters: tmp = p + " = parameters[p]" exec(tmp) # print conditional,type(batchsize),Channel[-1],kernal gifn = inits.Normal(scale=0.02) difn = inits.Normal(scale=0.02) ## filter_shape: (output channels, input channels, filter height, filter width, filter depth) ## load the parameters # gen_params = [gw1, gw2, gw3, gw4, gw5, gwx] # discrim_params = [dw1, dw2, dw3, dw4, dw5, dwy] temp = joblib.load('models%d/50_gen_params.jl'%objectNumber) gw1 = sharedX(temp[0]) gg1 = sharedX(temp[1]) gb1 = sharedX(temp[2]) gw2 = sharedX(temp[3]) gg2 = sharedX(temp[4]) gb2 = sharedX(temp[5]) gw3 = sharedX(temp[6]) gg3 = sharedX(temp[7]) gb3 = sharedX(temp[8]) gw4 = sharedX(temp[9]) gg4 = sharedX(temp[10]) gb4 = sharedX(temp[11]) gwx = sharedX(temp[12]) gen_params = [gw1, gg1, gb1, gw2, gg2, gb2, gw3, gg3, gb3, gw4 ,gg4, gb4, gwx] ## def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx): Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1)) Gl1 = Gl1.reshape((Gl1.shape[0],Channel[-1],Convlayersize[-1],Convlayersize[-1],Convlayersize[-1])) input_shape = (None , None,Convlayersize[-1],Convlayersize[-1],Convlayersize[-1]) filter_shape = (Channel[-1] , Channel[-2], kernal[-1], kernal[-1], kernal[-1]) Gl2 = relu(batchnorm(conv(Gl1,w2,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g2, b = b2)) input_shape = (None , None,Convlayersize[-2],Convlayersize[-2],Convlayersize[-2]) filter_shape = (Channel[-2] , Channel[-3], kernal[-2], kernal[-2], kernal[-2]) Gl3 = relu(batchnorm(conv(Gl2,w3,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g3, b = b3)) input_shape = (None , None,Convlayersize[-3],Convlayersize[-3],Convlayersize[-3]) filter_shape = (Channel[-3] , Channel[-4], kernal[-3], kernal[-3], kernal[-3]) Gl4 = relu(batchnorm(conv(Gl3,w4,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g4, b= b4)) input_shape = (None, None, Convlayersize[-4],Convlayersize[-4],Convlayersize[-4]) filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4], kernal[-4]) GlX = sigmoid(conv(Gl4,wx,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv')) return GlX X = T.tensor5() Z = T.matrix() gX = gen(Z, *gen_params) print 'COMPILING' t = time() # _train_g = theano.function([X, Z, Y], cost, updates=g_updates) # _train_d = theano.function([X, Z, Y], cost, updates=d_updates) _gen = theano.function([Z], gX) print '%.2f seconds to compile theano functions'%(time()-t) # trX, trY, ntrain = load_shapenet_train() n = 10 nbatch = 10 rng = np.random.RandomState(int(time())) # sample_ymb = floatX(np.asarray(np.eye(3))) z_dist = scipy.io.loadmat('Z_dist_class2.mat') z_mean = z_dist['mean'] z_mean = np.reshape(z_mean,(Nz,1)) z_std = z_dist['std'] z_std = np.reshape(z_std,(Nz,1)) def gen_z(z_dist,nbatch): ret = np.zeros((nbatch,Nz)) for j in xrange(Nz): z_tmp = np_rng.normal(z_mean[j],z_std[j],nbatch) ret[:,j] = z_tmp # print ret return ret try: os.mkdir('Gen_models%d'%objectNumber) except: pass for j in xrange(n/nbatch): sample_zmb = floatX(gen_z(z_dist,nbatch)) samples = np.asarray(_gen(sample_zmb)) for i in xrange(nbatch): io.savemat('Gen_models%d/Gen_example_%d.mat'%(objectNumber,nbatch*j+i),{'instance':samples[i,:,:,:],'Z':sample_zmb[i,:]}) # niter = 1 # niter_decay = 1
mit
4,118,196,402,505,532,400
27.942675
261
0.659991
false
2.552809
false
false
false
Keisuke69/libcloud
libcloud/loadbalancer/drivers/cloudstack.py
1
4800
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.common.cloudstack import CloudStackConnection, \ CloudStackDriverMixIn from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.loadbalancer.base import DEFAULT_ALGORITHM from libcloud.loadbalancer.types import State, LibcloudLBImmutableError from libcloud.utils import reverse_dict class CloudStackLBDriver(CloudStackDriverMixIn, Driver): """Driver for CloudStack load balancers.""" api_name = 'cloudstack_lb' _VALUE_TO_ALGORITHM_MAP = { 'roundrobin': Algorithm.ROUND_ROBIN, 'leastconn': Algorithm.LEAST_CONNECTIONS } _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) LB_STATE_MAP = { 'Active': State.RUNNING, } def list_protocols(self): """We don't actually have any protocol awareness beyond TCP.""" return [ 'tcp' ] def list_balancers(self): balancers = self._sync_request('listLoadBalancerRules') balancers = balancers.get('loadbalancerrule', []) return [self._to_balancer(balancer) for balancer in balancers] def get_balancer(self, balancer_id): balancer = self._sync_request('listLoadBalancerRules', id=balancer_id) balancer = balancer.get('loadbalancerrule', []) if not balancer: raise Exception("no such load balancer: " + str(balancer_id)) return self._to_balancer(balancer[0]) def create_balancer(self, name, members, protocol='http', port=80, algorithm=DEFAULT_ALGORITHM, location=None, private_port=None): if location is None: locations = self._sync_request('listZones') location = locations['zone'][0]['id'] else: location = location.id if private_port is None: private_port = port result = self._async_request('associateIpAddress', zoneid=location) public_ip = result['ipaddress'] result = self._sync_request('createLoadBalancerRule', algorithm=self._ALGORITHM_TO_VALUE_MAP[algorithm], name=name, privateport=private_port, publicport=port, publicipid=public_ip['id'], ) balancer = self._to_balancer(result['loadbalancer']) for member in members: balancer.attach_member(member) return balancer def destroy_balancer(self, balancer): self._async_request('deleteLoadBalancerRule', id=balancer.id) self._async_request('disassociateIpAddress', id=balancer.ex_public_ip_id) def balancer_attach_member(self, balancer, member): member.port = balancer.ex_private_port self._async_request('assignToLoadBalancerRule', id=balancer.id, virtualmachineids=member.id) return True def balancer_detach_member(self, balancer, member): self._async_request('removeFromLoadBalancerRule', id=balancer.id, virtualmachineids=member.id) return True def balancer_list_members(self, balancer): members = self._sync_request('listLoadBalancerRuleInstances', id=balancer.id) members = members['loadbalancerruleinstance'] return [self._to_member(m, balancer.ex_private_port) for m in members] def _to_balancer(self, obj): balancer = LoadBalancer( id=obj['id'], name=obj['name'], state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN), ip=obj['publicip'], port=obj['publicport'], driver=self.connection.driver ) balancer.ex_private_port = obj['privateport'] balancer.ex_public_ip_id = obj['publicipid'] return balancer def _to_member(self, obj, port): return Member( id=obj['id'], ip=obj['nic'][0]['ipaddress'], port=port )
apache-2.0
400,207,334,912,929,400
38.02439
78
0.63875
false
4.270463
false
false
false
BBN-Q/Quince
quince/param.py
1
16560
# coding: utf-8 # Raytheon BBN Technologies 2016 # Contributiors: Graham Rowlands # # This file contains the parameter descriptions from qtpy.QtGui import * from qtpy.QtCore import * from qtpy.QtWidgets import * import os class Parameter(QGraphicsEllipseItem): """docstring for Parameter""" def __init__(self, name, parent=None): self.name = name self.parent = parent rad = 5 super(Parameter, self).__init__(-rad, -rad, 2*rad, 2*rad, parent=parent) self.has_input = True # Do we draw the connector? self.interactive = True # Can we modify the value? self.setBrush(QBrush(QColor(200,200,240))) self.setPen(Qt.black) self.setZValue(1) self.height = 36 self.height_collapsed = 15 self.temp_wire = None self.wires_in = [] self.wires_out = [] # Text label and area self.label = QGraphicsTextItem(self.name, parent=self) self.label.setDefaultTextColor(Qt.black) self.label.setPos(5,-10) # Value Box self.value_box = None def set_changed_flag(self): # Would prefer to use signals/slots, but that's apparently too heavy for QGraphics # Instead we add the name of the changed parameter to the list if self.parent is not None and not self.parent.changing: self.parent.changing = True self.parent.value_changed( self.name ) def set_interactive(self, value): self.interactive = value self.value_box.interactive = value def set_collapsed(self, collapsed): self.collapsed = collapsed self.value_box.setVisible(not self.collapsed) def width(self): return self.label.boundingRect().topRight().x() def set_box_width(self, width): self.value_box.set_box_width(width) def value(self): return self.value_box.value() def set_value(self, value): self.value_box.set_value(value) self.set_changed_flag() def paint(self, painter, options, widget): if self.has_input: super(Parameter, self).paint(painter, options, widget) class NumericalParameter(Parameter): """docstring for Parameter""" def __init__(self, name, datatype, min_value, max_value, increment, snap, parent=None): super(NumericalParameter, self).__init__(name, parent=parent) self.datatype = datatype self.value_box = SliderBox( datatype, min_value, max_value, increment, snap, parent=self) def set_value(self, value): self.value_box.set_value(self.datatype(value)) self.set_changed_flag() class StringParameter(Parameter): """docstring for Parameter""" def __init__(self, name, parent=None): super(StringParameter, self).__init__(name, parent=parent) self.value_box = StringBox(parent=self) self.parent = parent def set_value(self, value): self.value_box.set_value(value) class ComboParameter(StringParameter): """docstring for Parameter""" def __init__(self, name, values, parent=None): super(ComboParameter, self).__init__(name, parent=parent) self.value_box.setParentItem(None) self.value_box = ComboBox(values, parent=self) def set_collapsed(self, collapsed): self.collapsed = collapsed self.value_box.setVisible(not self.collapsed) class BooleanParameter(Parameter): """docstring for Parameter""" def __init__(self, name, parent=None): super(BooleanParameter, self).__init__(name, parent=parent) self.value_box = CheckBox(parent=self) self.height = 15 self.height_collapsed = 15 def width(self): return self.label.boundingRect().topRight().x() + 18 class FilenameParameter(StringParameter): """docstring for Parameter""" def __init__(self, name, parent=None): super(FilenameParameter, self).__init__(name, parent=parent) self.value_box.setParentItem(None) self.value_box = FilenameBox(parent=self) def width(self): return self.label.boundingRect().topRight().x() + 20 class SliderBox(QGraphicsRectItem): """docstring for SliderBox""" def __init__(self, datatype, min_value, max_value, increment, snap, parent=None): super(SliderBox, self).__init__(parent=parent) self.parent = parent self.dragging = False self.value_changed = False self.interactive = True self.datatype = datatype self.min_value = min_value self.max_value = max_value self.increment = increment self.snap = snap self._value = min_value self.height = 14 self.rect_radius = 7.0 self.control_distance = 0.55228*self.rect_radius self.setRect(3,15,94,self.height) self.label = ValueBoxText(self.textFromValue(self._value), parent=self) label_width = self.label.boundingRect().topRight().x() self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5) def paint(self, painter, options, widget): # Background object is a rounded rectangle linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft()) linear_gradient.setColorAt(0, QColor(150,150,150)) linear_gradient.setColorAt(1, QColor(200,200,200)) painter.RenderHint(QPainter.Antialiasing) painter.setBrush(QBrush(linear_gradient)) painter.setPen(QPen(QColor(200,200,200), 0.75)) painter.drawRoundedRect(self.rect(), self.rect_radius, self.rect_radius) # Draw the bar using a round capped line linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().topRight()) linear_gradient.setColorAt(0, QColor(180,180,220)) linear_gradient.setColorAt(1, QColor(80,80,100)) painter.setPen(QPen(QBrush(linear_gradient), 0.9*self.height, Qt.SolidLine, Qt.RoundCap)) path = QPainterPath() path.moveTo(3+self.rect_radius, 15 + 0.5*self.height) fill_size = (self.rect().width()-2*self.rect_radius)*(self._value-self.min_value)/(self.max_value-self.min_value) path.lineTo(3+self.rect_radius+fill_size, 7.5 + 0.5+self.height) painter.drawPath(path) # Draw the highlight line similarly linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft()) linear_gradient.setColorAt(0, QColor(240,240,240,150)) linear_gradient.setColorAt(0.3, QColor(240,240,240,00)) painter.setPen(QPen(QBrush(linear_gradient), 0.9*self.height, Qt.SolidLine, Qt.RoundCap)) path = QPainterPath() path.moveTo(3+self.rect_radius, 15.0 + 0.5*self.height) path.lineTo(3+self.rect_radius+fill_size, 15.0 + 0.5*self.height) painter.drawPath(path) def valueFromText(self, text): try: if self.datatype is int: val = int(str(text)) else: val = float(str(text)) return val except: self.scene().window.set_status("Got unreasonable input...") return self._value def textFromValue(self, value): if self.datatype is int: return ("{:d}".format(value)) else: return ("{:.4g}".format(value)) def set_value(self, val): changed = False val = self.valueFromText(val) if val >= self.min_value and val <= self.max_value: if self.snap: val = (val/self.snap)*self.snap self._value = self.datatype(val) changed = True elif val < self.min_value: self._value = self.datatype(self.min_value) changed = True else: self._value = self.datatype(self.max_value) changed = True self.label.full_text = self.textFromValue(self._value) self.label.setPlainText(self.textFromValue(self._value)) self.refresh_label() self.update() if changed: self.parent.set_changed_flag() def refresh_label(self): label_width = self.label.boundingRect().topRight().x() self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5) self.update() def value(self): return self._value def set_box_width(self, width): self.setRect(3,15, width-6, self.height) label_width = self.label.boundingRect().topRight().x() self.label.clip_text() self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5) def mousePressEvent(self, event): if self.interactive: self.dragging = True self.original_value = self._value self.drag_start = event.scenePos() else: super(SliderBox, self).mouseMoveEvent(event) def mouseMoveEvent(self, event): if self.interactive: if self.dragging: delta = event.scenePos() - self.drag_start value_change = self.increment*int(delta.x()/10.0) if value_change != 0.0: self.value_changed = True self.set_value(self.original_value + value_change) else: super(SliderBox, self).mouseMoveEvent(event) def mouseReleaseEvent(self, event): if self.interactive: self.dragging = False if not self.value_changed: self.label.setPos(3+5,15-5) self.label.set_text_interaction(True) self.value_changed = False else: super(SliderBox, self).mouseMoveEvent(event) class StringBox(QGraphicsRectItem): """docstring for SliderBox""" def __init__(self, parent=None): super(StringBox, self).__init__(parent=parent) self.clicked = False self._value = "" self.height = 14 self.rect_radius = 7.0 self.control_distance = 0.55228*self.rect_radius self.setRect(3,15,94,self.height) self.label = ValueBoxText(self._value, parent=self) label_width = self.label.boundingRect().topRight().x() self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5) def paint(self, painter, options, widget): # Background object is a rounded rectangle linear_gradient = QLinearGradient(self.rect().topLeft(), self.rect().bottomLeft()) linear_gradient.setColorAt(0, QColor(150,150,150)) linear_gradient.setColorAt(1, QColor(200,200,200)) painter.RenderHint(QPainter.Antialiasing) painter.setBrush(QBrush(linear_gradient)) painter.setPen(QPen(QColor(200,200,200), 0.75)) painter.drawRoundedRect(self.rect(), self.rect_radius, self.rect_radius) def set_value(self, value): self._value = value self.label.full_text = value self.label.setPlainText(value) self.label.clip_text() self.refresh_label() self.update() if hasattr(self, 'parent'): self.parent.set_changed_flag() def refresh_label(self): label_width = self.label.boundingRect().topRight().x() self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5) self.update() def value(self): return self._value def set_box_width(self, width): self.setRect(3,15, width-6, self.height) self.label.clip_text() self.refresh_label() def mousePressEvent(self, event): self.clicked = True def mouseReleaseEvent(self, event): if self.clicked: self.label.setPos(3+5,15-5) self.label.set_text_interaction(True) self.clicked = False class FilenameBox(StringBox): """docstring for FilenameBox""" def __init__(self, parent=None): super(FilenameBox, self).__init__(parent=parent) self.browse_button = QGraphicsRectItem(self.rect().width()-16, -3, 15, 12, parent=self) self.browse_button.setBrush(QBrush(QColor(220,220,220))) self.browse_button.mousePressEvent = lambda e: self.save_file() # self.browse_button.mouseReleaseEvent = lambda e: self.save_file() def save_file(self): path = os.path.dirname(os.path.realpath(__file__)) fn = QFileDialog.getSaveFileName(None, 'Save Results As', path) self.set_value(fn[0]) self.label.clip_text() self.refresh_label() def refresh_label(self): label_width = self.label.boundingRect().topRight().x() self.label.setPos(3+0.5*self.rect().width()-0.5*label_width,15-5) self.browse_button.setRect(self.rect().width()-16, -3, 15, 12) self.update() class ComboBox(StringBox): """docstring for ComboBox""" def __init__(self, values, parent=None): super(ComboBox, self).__init__(parent=parent) self.values = values def menu_changed(self, action): self.set_value(action.data()) def mousePressEvent(self, event): self.clicked = True def mouseReleaseEvent(self, event): if self.clicked: menu = QMenu() for v in self.values: act = QAction(v, self.scene()) act.setData(v) menu.addAction(act) menu.triggered.connect(self.menu_changed) menu.exec_(event.screenPos()) self.clicked = False class CheckBox(QGraphicsRectItem): """docstring for CheckBox""" def __init__(self, parent=None): super(CheckBox, self).__init__(parent=parent) self.parent = parent self.setRect(self.rect().width()-17, -3, 13, 13) self.unchecked_brush = QBrush(QColor(220,220,220)) self.checked_brush = QBrush(QColor(40,40,40)) self.setBrush(self.unchecked_brush) self._value = False self.clicked = False def set_box_width(self, width): self.setRect(width-17, -3, 13, 13) def value(self): return self._value def set_value(self, value): self._value = value if self._value: self.setBrush(self.checked_brush) else: self.setBrush(self.unchecked_brush) def mousePressEvent(self, event): self.clicked = True def mouseReleaseEvent(self, event): if self.clicked: self.set_value(not self._value) self.clicked = False class ValueBoxText(QGraphicsTextItem): """docstring for ValueBoxText""" def __init__(self, string, parent=None): super(ValueBoxText, self).__init__(string, parent=parent) self.setTextInteractionFlags(Qt.NoTextInteraction) self.ItemIsFocusable = True self.parent = parent self.full_text = string self.clip_text() def set_text_interaction(self, value): if value and (self.textInteractionFlags() == Qt.NoTextInteraction): self.setTextInteractionFlags(Qt.TextEditorInteraction) self.setPlainText(self.full_text) self.setFocus(Qt.MouseFocusReason) self.setSelected(True) c = self.textCursor() c.select(QTextCursor.Document) self.setTextCursor(c) elif not value and (self.textInteractionFlags() == Qt.TextEditorInteraction): self.setTextInteractionFlags(Qt.NoTextInteraction) c = self.textCursor() c.clearSelection() self.setTextCursor(c) self.clearFocus() def clip_text(self): if self.parent.rect().width() < self.boundingRect().topRight().x(): clipped = self.full_text[:int(self.parent.rect().width()/7)-3] if int(self.parent.rect().width()/6)-3 == len(self.full_text)-1: self.setPlainText(clipped) else: self.setPlainText(clipped+"...") def focusOutEvent(self, event): self.full_text = self.toPlainText() self.set_text_interaction(False) self.parent.set_value(self.full_text) self.clip_text() self.parent.refresh_label() return super(ValueBoxText, self).focusOutEvent(event) def keyPressEvent(self, event): if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter: self.full_text = self.toPlainText() self.set_text_interaction(False) self.parent.set_value(self.full_text) self.clip_text() self.parent.refresh_label() else: return super(ValueBoxText, self).keyPressEvent(event)
apache-2.0
6,470,298,466,741,095,000
35.315789
121
0.61087
false
3.684913
false
false
false
charleso/git-cc
git_cc/gitcc.py
1
2009
#!/usr/bin/env python import inspect import sys from optparse import OptionParser from . import checkin from . import init from . import rebase from . import reset from . import sync from . import tag from . import update from . import version commands = [ init, rebase, checkin, sync, reset, tag, update, version ] def main(): args = sys.argv[1:] for cmd in commands: if args and get_module_name(cmd) == args[0]: return invoke(cmd, args) usage() def invoke(cmd, args): _args, _, _, defaults = inspect.getargspec(cmd.main) defaults = defaults if defaults else [] diff = len(_args) - len(defaults) _args = _args[diff:] parser = OptionParser(description=cmd.__doc__) for (name, default) in zip(_args, defaults): option = { 'default': default, 'help': cmd.ARGS[name], 'dest': name, } if default is False: option['action'] = "store_true" elif default is None: option['action'] = "store" name = name.replace('_', '-') parser.add_option('--' + name, **option) (options, args) = parser.parse_args(args[1:]) if len(args) < diff: parser.error("incorrect number of arguments") for name in _args: args.append(getattr(options, name)) cmd.main(*args) def usage(): print('usage: gitcc COMMAND [ARGS]\n') width = 11 for cmd in commands: print(' %s %s' % (get_module_name(cmd).ljust(width), cmd.__doc__.split('\n')[0])) sys.exit(2) def get_module_name(module): """Return the name of the given module, without the package name. For example, if the given module is checkin, the module name is "git_cc.checkin" and without the package name is "checkin". Note that the given module should already have been imported. """ _, _, module_name = module.__name__.rpartition('.') return module_name if __name__ == '__main__': main()
gpl-2.0
6,877,740,957,302,435,000
25.090909
69
0.590841
false
3.769231
false
false
false
G8bao7/camelbell-server
check_oracle.py
1
10171
#!//bin/env python #coding:utf-8 import os import sys import string import time import datetime import MySQLdb import cx_Oracle import logging import logging.config logging.config.fileConfig("etc/logger.ini") logger = logging.getLogger("oracle") path='./include' sys.path.insert(0,path) import functions as func import camelbell_oracle as oracle from multiprocessing import Process; def check_oracle(host,port,dsn,username,password,server_id,tags): url = "%s:%s/%s" % (host, port, dsn) logger_msg = "[BBQ]begin check oracle %s " %(url) logger.info(logger_msg) retry = 4 conn = None for i in range(1,retry): try: logger_msg="[BBQ] oracle connect %s retry [%s]" %(url, i) logger.info(logger_msg) conn=cx_Oracle.connect(username,password,url) #获取connection对象 break except Exception, e: logger_msg="[BBQ] oracle connect %s, %s" %(url,str(e).strip('\n')) logger.warning(logger_msg) conn = None continue func.check_db_status(server_id,host,port,tags,'oracle') if conn == None: try: connect=0 sql="replace into oracle_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)" param=(server_id,host,port,tags,connect) func.mysql_exec(sql,param) except Exception, e: logger.error(str(e).strip('\n')) sys.exit(1) finally: sys.exit(1) try: #get info by v$instance connect = 1 instance_name = oracle.get_instance(conn,'instance_name') instance_role = oracle.get_instance(conn,'instance_role') database_role = oracle.get_database(conn,'database_role') open_mode = oracle.get_database(conn,'open_mode') protection_mode = oracle.get_database(conn,'protection_mode') if database_role == 'PRIMARY': database_role_new = 'm' dg_stats = '-1' dg_delay = '-1' else: database_role_new = 's' dg_stats = oracle.get_dg_stats(conn) dg_delay = oracle.get_dg_delay(conn) instance_status = oracle.get_instance(conn,'status') startup_time = oracle.get_instance(conn,'startup_time') #print startup_time #startup_time = time.strftime('%Y-%m-%d %H:%M:%S',startup_time) #localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) #uptime = (localtime - startup_time).seconds #print uptime uptime = oracle.get_instance(conn,'startup_time') version = oracle.get_instance(conn,'version') instance_status = oracle.get_instance(conn,'status') database_status = oracle.get_instance(conn,'database_status') host_name = oracle.get_instance(conn,'host_name') archiver = oracle.get_instance(conn,'archiver') #get info by sql count session_total = oracle.get_sessions(conn) session_actives = oracle.get_actives(conn) session_waits = oracle.get_waits(conn) #get info by v$parameters parameters = oracle.get_parameters(conn) processes = parameters['processes'] ##get info by v$parameters sysstat_0 = oracle.get_sysstat(conn) time.sleep(1) sysstat_1 = oracle.get_sysstat(conn) session_logical_reads_persecond = sysstat_1['session logical reads']-sysstat_0['session logical reads'] physical_reads_persecond = sysstat_1['physical reads']-sysstat_0['physical reads'] physical_writes_persecond = sysstat_1['physical writes']-sysstat_0['physical writes'] physical_read_io_requests_persecond = sysstat_1['physical write total IO requests']-sysstat_0['physical write total IO requests'] physical_write_io_requests_persecond = sysstat_1['physical read IO requests']-sysstat_0['physical read IO requests'] db_block_changes_persecond = sysstat_1['db block changes']-sysstat_0['db block changes'] os_cpu_wait_time = sysstat_0['OS CPU Qt wait time'] logons_persecond = sysstat_1['logons cumulative']-sysstat_0['logons cumulative'] logons_current = sysstat_0['logons current'] opened_cursors_persecond = sysstat_1['opened cursors cumulative']-sysstat_0['opened cursors cumulative'] opened_cursors_current = sysstat_0['opened cursors current'] user_commits_persecond = sysstat_1['user commits']-sysstat_0['user commits'] user_rollbacks_persecond = sysstat_1['user rollbacks']-sysstat_0['user rollbacks'] user_calls_persecond = sysstat_1['user calls']-sysstat_0['user calls'] db_block_gets_persecond = sysstat_1['db block gets']-sysstat_0['db block gets'] #print session_logical_reads_persecond ##################### insert data to mysql server############################# func.mysql_exec("replace into oracle_status_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from oracle_status where host='%s' and port=%s;" % (host, port),'') func.mysql_exec("delete from oracle_status where host='%s' and port=%s;" % (host, port),'') sql = "insert into oracle_status(server_id,host,port,tags,connect,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);" param = (server_id,host,port,tags,connect,instance_name,instance_role,instance_status,database_role,open_mode,protection_mode,host_name,database_status,startup_time,uptime,version,archiver,session_total,session_actives,session_waits,dg_stats,dg_delay,processes,session_logical_reads_persecond,physical_reads_persecond,physical_writes_persecond,physical_read_io_requests_persecond,physical_write_io_requests_persecond,db_block_changes_persecond,os_cpu_wait_time,logons_persecond,logons_current,opened_cursors_persecond,opened_cursors_current,user_commits_persecond,user_rollbacks_persecond,user_calls_persecond,db_block_gets_persecond) func.mysql_exec(sql,param) logger.info("Finish INSERT DATA ") func.update_db_status_init(server_id,database_role_new,version,host,port,tags) logger.info("Finish update_db_status_init") #check tablespace qSql = "select 1 from oracle_tablespace where host='%s' and port=%s and create_time>=curdate() limit 1" % (host,port) a = func.mysql_query(qSql) if func.mysql_query(qSql) == 0: func.mysql_exec("insert ignore into oracle_tablespace_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from oracle_tablespace where host='%s' and port=%s;" % (host, port),'') func.mysql_exec("delete from oracle_tablespace where host='%s' and port=%s;" % (host, port),'') tablespace = oracle.get_tablespace(conn) if tablespace: for line in tablespace: ts_name=line[0] if igTsNames.count(ts_name) > 0: continue sql="insert into oracle_tablespace(server_id,host,port,tags,tablespace_name,total_size,used_size,avail_size,used_rate) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)" param=(server_id,host,port,tags,line[0],line[1],line[2],line[3],int(line[4].rstrip("%"))) logger.info(param) func.mysql_exec(sql,param) else: logger.info("%s:%s today has stat oracle_tablespace. will not do" % (host,port)) logger.info("Finish oracle_tablespace") except Exception, e: logger.error(e) sys.exit(1) finally: conn.close() def main(): #get oracle servers list #servers=func.mysql_query("select id,host,port,dsn,username,password,tags from db_servers_oracle where is_delete=0 and monitor=1;") servers=func.mysql_query("select id,host,port,dsn,tags from db_servers_oracle where is_delete=0 and monitor=1;") #++ guoqi cnfKey = "monitor_oracle" username = func.get_config(cnfKey,'user') password = func.get_config(cnfKey,'passwd') min_interval = func.get_option('min_interval') logger.info("check oracle controller start.") if servers: plist = [] for row in servers: (server_id, host, port, dsn, tags) = row p = Process(target = check_oracle, args = (host,port,dsn,username,password,server_id,tags)) plist.append(p) p.start() #time.sleep(10) #for p in plist: # p.terminate() for p in plist: p.join() else: logger.warning("check oracle: not found any servers") func.mysql_exec('update oracle_status set connect=0,create_time=now() where create_time<date_sub(now(), interval %s second)' % (min_interval)) func.mysql_exec('DELETE ot FROM oracle_tablespace AS ot, db_servers_oracle AS d where (d.is_delete=1 or d.monitor=0) AND ot.host=d.host AND ot.port=d.port') func.mysql_exec('DELETE ot FROM oracle_status AS ot, db_servers_oracle AS d where (d.is_delete=1 or d.monitor=0) AND ot.host=d.host AND ot.port=d.port') #func.mysql_exec('DELETE ds FROM oracle_status AS ds, (SELECT s.id,d.host FROM oracle_status AS s LEFT JOIN db_servers_oracle AS d ON d.is_delete=0 AND d.monitor=1 AND s.host=d.host AND s.port=d.port HAVING d.`host` IS NULL) AS t WHERE ds.id=t.id') func.mysql_exec('DELETE ds FROM db_status AS ds, (SELECT s.id,d.host FROM db_status AS s LEFT JOIN db_servers_oracle AS d ON d.is_delete=0 AND d.monitor=1 AND s.host=d.host AND s.port=d.port WHERE db_type="oracle" HAVING d.`host` IS NULL) AS t WHERE ds.id=t.id') logger.info("check oracle controller finished.") if __name__=='__main__': igTsNames = ["SYSAUX", "SYSTEM"] main()
gpl-3.0
-3,721,397,332,160,236,000
51.386598
790
0.662698
false
3.315824
false
false
false
johnmgregoire/NanoCalorimetry
plot_pprvsTsubtract20110818.py
1
3566
import numpy, h5py, os from PnSC_main import * from PnSC_h5io import * from PnSC_math import * p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/20110816_Zr-Hf-B.h5' h5f=h5py.File(p, mode='r') ehl=[\ ('quadlinheating2_0817', 'cell9_25mAlinquad2_first_1_of_1', 'Zr-B, 1st'),\ ('quadlinheating2_0817', 'cell9_25mAlinquad2_second_1_of_1', 'Zr-B, 2nd'),\ ('quadlinheating2_0817', 'cell9_25mAlinquad2_third_1_of_1', 'Zr-B, 3rd'), \ #('quadlinheating2', 'pre_25mApluslinquad2_cell16_1_of_1', 'Hf-B, nth'), \ #('quadlinheating2', 'cell11_25malinquad2_1_of_1', 'empty'), \ ] tarrs=[] pprarrs=[] for i, (e, h, l) in enumerate(ehl): hpsdl=CreateHeatProgSegDictList(p, e, h) T=hpsdl[2]['sampletemperature'][0, :] ppr=hpsdl[2]['samplepowerperrate'][0, :] if 0: pylab.plot(T, ppr*1.e6, label=l) pylab.xlabel('Temperature (C)') pylab.ylabel('power per rate ($\mu$J/K)') pylab.legend(loc=0) tarrs+=[T] pprarrs+=[ppr] def extremesmooth(x, binn=70, SGpts=170, SGorder=3): xb=numpy.array([x[i*binn:(i+1)*binn].mean() for i in range(len(x)//binn)]) xbf=savgolsmooth(xb, nptsoneside=SGpts, order =SGorder) ia=numpy.arange(binn, dtype='float32')/binn xr=numpy.concatenate([ia*(b-a)+b for a, b in zip(xbf[:-1], xbf[1:])]) xr=numpy.concatenate([(xbf[1]-xbf[0])*ia[:binn//2]+xbf[0], xr, (xbf[-1]-xbf[-2])*ia[:binn//2]+xbf[-1]]) xr=numpy.concatenate([xr, (xbf[-1]-xbf[-2])*ia[:len(x)-len(xr)]+xbf[-1]]) return xr if 1: x=extremesmooth(pprarrs[0]) y=extremesmooth(pprarrs[1]) z=extremesmooth(pprarrs[2]) xt=tarrs[0] yt=tarrs[1] zt=tarrs[2] tmin=max([t.min() for t in [xt, yt, zt]]) tmax=min([t.max() for t in [xt, yt, zt]]) tinterp=numpy.linspace(tmin, tmax, 2000) xinterp=numpy.interp(tinterp, xt, x) yinterp=numpy.interp(tinterp, yt, y) zinterp=numpy.interp(tinterp, zt, z) pylab.figure() for i, (t, a, ai) in enumerate([(xt, x, xinterp), (yt, y, yinterp), (zt, z, zinterp)]): pylab.subplot(3, 1, i+1) pylab.plot(tinterp, ai) pylab.plot(t, a) pylab.figure() xsub=xinterp-(zinterp+yinterp)/2. for i, (a, l) in enumerate([(xinterp, '1st'), ((zinterp+yinterp)/2., 'subsequent')]): pylab.plot(tinterp, a*1.e6, label=l, lw=2) #pylab.legend(loc=2) pylab.xlabel('Temperature (C)', fontsize=14) pylab.ylabel('Calorimetric Signal ($\mu$J/K)', fontsize=14) # pylab.text(700, 14, '1st',color='b', ha='left', fontsize=14) # pylab.text(450, 14, 'subsequent',color='g', ha='right', fontsize=14) pylab.annotate('1st',(540, 14),xytext=(630, 14),fontsize=14,color='b',arrowprops={'arrowstyle':'->','color':'b'}) pylab.annotate('subsequent',(490, 14),xytext=(380, 14),fontsize=14,color='g',arrowprops={'arrowstyle':'->','color':'g'}, ha='right') pylab.xlim(0, 1200) pylab.figure() pylab.plot([0, 1200], [0, 0], 'k', lw=1) pylab.plot(tinterp, xsub*1.e6, 'r-', lw=2) # pylab.annotate(' ',(510, -2),xytext=(510, 0),color='k',arrowprops={'arrowstyle':'simple','color':'k'}) # pylab.annotate(' ',(1010, -14),xytext=(1010, 0),color='k',arrowprops={'arrowstyle':'simple','color':'k'}) #pylab.legend() pylab.xlabel('Temperature (C)', fontsize=14) pylab.ylabel('Differential signal ($\mu$J/K)', fontsize=14) pylab.xlim(0, 1200) pylab.subplots_adjust(right=.55, top=.5) print xsub[(tinterp>260)*(tinterp<670)].sum()*(tinterp[1]-tinterp[0])*1.e6 print xsub[tinterp>670].sum()*(tinterp[1]-tinterp[0])*1.e6 pylab.show()
bsd-3-clause
3,636,669,032,018,904,600
39.988506
136
0.616938
false
2.461008
false
false
false
rmed/wat-bridge
wat_bridge/signals.py
1
3542
# -*- coding: utf-8 -*- # # wat-bridge # https://github.com/rmed/wat-bridge # # The MIT License (MIT) # # Copyright (c) 2016 Rafael Medina García <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Signal handlers.""" import sys from wat_bridge.static import SETTINGS, get_logger from wat_bridge.helper import get_contact, get_phone, db_get_group from wat_bridge.tg import tgbot from wat_bridge.wa import wabot from telebot import util as tgutil logger = get_logger('signals') def sigint_handler(signal, frame): """Function used as handler for SIGINT to terminate program.""" sys.exit(0) def to_tg_handler(sender, **kwargs): """Handle signals sent to Telegram. This will involve sending messages through the Telegram bot. Args: phone (str): Phone number that sent the message. message (str): The message received """ phone = kwargs.get('phone') message = kwargs.get('message', '') # Check if known contact contact = get_contact(phone) chat_id = SETTINGS['owner'] if not contact: # Unknown sender output = 'Message from #unknown\n' output += 'Phone number: %s\n' % phone output += '---------\n' output += message logger.info('received message from unknown number: %s' % phone) else: group = db_get_group(contact) if not group: # Known sender output = 'Message from #%s\n' % contact output += '---------\n' output += message else: # Contact is bound to group chat_id = group output = message logger.info('received message from %s' % contact) # Deliver message through Telegram for chunk in tgutil.split_string(output, 3000): tgbot.send_message(chat_id, chunk) def to_wa_handler(sender, **kwargs): """Handle signals sent to Whatsapp. This will involve sending messages through the Whatsapp bot. Args: contact (str): Name of the contact to send the message to. message (str): The message to send """ contact = kwargs.get('contact') message = kwargs.get('message') # Check if known contact phone = get_phone(contact) if not phone: # Abort tgbot.send_message( SETTINGS['owner'], 'Unknown contact: "%s"' % contact ) return logger.info('sending message to %s (%s)' % (contact, phone)) wabot.send_msg(phone=phone, message=message)
mit
-2,279,454,052,211,999,200
29.791304
80
0.661395
false
3.996614
false
false
false
Caylo/easybuild-framework
easybuild/toolchains/linalg/libsci.py
1
3408
## # Copyright 2014-2016 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ Support for Cray's LibSci library, which provides BLAS/LAPACK support. cfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/ :author: Petar Forai (IMP/IMBA, Austria) :author: Kenneth Hoste (Ghent University) """ import os from easybuild.tools.build_log import EasyBuildError from easybuild.tools.toolchain.linalg import LinAlg CRAY_LIBSCI_MODULE_NAME = 'cray-libsci' class LibSci(LinAlg): """Support for Cray's LibSci library, which provides BLAS/LAPACK support.""" # BLAS/LAPACK support # via cray-libsci module, which gets loaded via the PrgEnv module # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/ BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME] # no need to specify libraries, compiler driver takes care of linking the right libraries # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper. BLAS_LIB = [] BLAS_LIB_MT = [] LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME] LAPACK_IS_BLAS = True BLACS_MODULE_NAME = [] SCALAPACK_MODULE_NAME = [] def _get_software_root(self, name): """Get install prefix for specified software name; special treatment for Cray modules.""" if name == 'cray-libsci': # Cray-provided LibSci module env_var = 'CRAY_LIBSCI_PREFIX_DIR' root = os.getenv(env_var, None) if root is None: raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var) else: self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root) else: root = super(LibSci, self)._get_software_root(name) return root def _set_blacs_variables(self): """Skip setting BLACS related variables""" pass def _set_scalapack_variables(self): """Skip setting ScaLAPACK related variables""" pass def definition(self): """ Filter BLAS module from toolchain definition. The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module, and thus is not a direct toolchain component. """ tc_def = super(LibSci, self).definition() tc_def['BLAS'] = [] tc_def['LAPACK'] = [] return tc_def
gpl-2.0
3,152,435,905,276,720,000
36.450549
104
0.681631
false
3.491803
false
false
false
dsparrow27/vortex
src/ds/vortex/nodes/comparison/equalTo.py
1
1290
from ds.vortex.core import baseNode from ds.vortex.core import plug as plugs class EqualToNode(baseNode.BaseNode): def __init__(self, name): """ :param name: str, the name of the node """ baseNode.BaseNode.__init__(self, name) def initialize(self): baseNode.BaseNode.initialize(self) self.outputPlug_ = plugs.OutputPlug("output", self) self.addPlug(self.outputPlug_, clean=True) self.value1Plug_ = plugs.InputPlug("value1", self, value=0) self.value2Plug_ = plugs.InputPlug("value2", self, value=0) self.addPlug(self.value1Plug_, clean=True) self.addPlug(self.value2Plug_, clean=True) self.plugAffects(self.value1Plug_, self.outputPlug_) self.plugAffects(self.value2Plug_, self.outputPlug_) def compute(self, requestPlug): baseNode.BaseNode.compute(self, requestPlug=requestPlug) if requestPlug != self.outputPlug_: return None result = self.value1Plug_ == self.value2Plug_.value requestPlug.value = result requestPlug.dirty = False return result def getNode(): """General function that returns our node, used to get create our node via Ui etc :return: Node instance """ return EqualToNode
mit
-2,959,725,175,633,095,000
32.076923
85
0.651938
false
3.623596
false
false
false
dkulikovsky/graphite-ch-web
webapp/graphite/events/views.py
1
2767
import datetime import time from django.http import HttpResponse from django.shortcuts import render_to_response, get_object_or_404 from django.utils.timezone import localtime, now from graphite.util import json from graphite.events import models from graphite.render.attime import parseATTime from django.core.urlresolvers import get_script_prefix def to_timestamp(dt): return time.mktime(dt.timetuple()) class EventEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.datetime): return to_timestamp(obj) return json.JSONEncoder.default(self, obj) def view_events(request): if request.method == "GET": context = { 'events' : fetch(request), 'slash' : get_script_prefix() } return render_to_response("events.html", context) else: return post_event(request) def detail(request, event_id): e = get_object_or_404(models.Event, pk=event_id) context = { 'event' : e, 'slash' : get_script_prefix() } return render_to_response("event.html", context) def post_event(request): if request.method == 'POST': event = json.loads(request.body) assert isinstance(event, dict) values = {} values["what"] = event["what"] values["tags"] = event.get("tags", None) values["when"] = datetime.datetime.fromtimestamp( event.get("when", time.time())) if "data" in event: values["data"] = event["data"] e = models.Event(**values) e.save() return HttpResponse(status=200) else: return HttpResponse(status=405) def get_data(request): if 'jsonp' in request.REQUEST: response = HttpResponse( "%s(%s)" % (request.REQUEST.get('jsonp'), json.dumps(fetch(request), cls=EventEncoder)), content_type='text/javascript') else: response = HttpResponse( json.dumps(fetch(request), cls=EventEncoder), content_type="application/json") return response def fetch(request): #XXX we need to move to USE_TZ=True to get rid of localtime() conversions if request.GET.get("from", None) is not None: time_from = localtime(parseATTime(request.GET["from"])).replace(tzinfo=None) else: time_from = datetime.datetime.fromtimestamp(0) if request.GET.get("until", None) is not None: time_until = localtime(parseATTime(request.GET["until"])).replace(tzinfo=None) else: time_until = now() tags = request.GET.get("tags", None) if tags is not None: tags = request.GET.get("tags").split(" ") return [x.as_dict() for x in models.Event.find_events(time_from, time_until, tags=tags)]
apache-2.0
3,258,372,787,531,299,000
29.406593
86
0.633177
false
3.774898
false
false
false
jat255/hyperspy
hyperspy/tests/misc/test_utils.py
1
2016
# -*- coding: utf-8 -*- # Copyright 2007-2020 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. from hyperspy.misc.utils import slugify, parse_quantity, is_hyperspy_signal from hyperspy import signals import numpy as np def test_slugify(): assert slugify('a') == 'a' assert slugify('1a') == '1a' assert slugify('1') == '1' assert slugify('a a') == 'a_a' assert slugify('a', valid_variable_name=True) == 'a' assert slugify('1a', valid_variable_name=True) == 'Number_1a' assert slugify('1', valid_variable_name=True) == 'Number_1' assert slugify('a', valid_variable_name=False) == 'a' assert slugify('1a', valid_variable_name=False) == '1a' assert slugify('1', valid_variable_name=False) == '1' def test_parse_quantity(): # From the metadata specification, the quantity is defined as # "name (units)" without backets in the name of the quantity assert parse_quantity('a (b)') == ('a', 'b') assert parse_quantity('a (b/(c))') == ('a', 'b/(c)') assert parse_quantity('a (c) (b/(c))') == ('a (c)', 'b/(c)') assert parse_quantity('a [b]') == ('a [b]', '') assert parse_quantity('a [b]', opening = '[', closing = ']') == ('a', 'b') def test_is_hyperspy_signal(): s = signals.Signal1D(np.zeros((5, 5, 5))) p = object() assert is_hyperspy_signal(s) is True assert is_hyperspy_signal(p) is False
gpl-3.0
5,785,363,726,843,865,000
37.037736
78
0.659722
false
3.371237
false
false
false
blowmage/gcloud-python
gcloud/storage/demo/__init__.py
1
1054
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from gcloud import storage __all__ = ['create_bucket', 'list_buckets', 'PROJECT_ID'] PROJECT_ID = os.getenv('GCLOUD_TESTS_PROJECT_ID') def list_buckets(connection): return list(storage.list_buckets(project=PROJECT_ID, connection=connection)) def create_bucket(bucket_name, connection): return storage.create_bucket(bucket_name, PROJECT_ID, connection=connection)
apache-2.0
5,348,173,973,588,529,000
34.133333
74
0.706831
false
4.101167
false
false
false
vaiski/checksum
src/checksum/checksum.py
1
4815
# -*- coding: utf-8 -*- ''' Checksum ======== Provides an extendable checksum calculation and validation library for different checksum algorithms. ''' class ChecksumStrategy(object): ''' An interface class for checksum algorithm classes. ''' def checksum(self, body): ''' Calculates a checksum for the body string provided. ''' raise NotImplementedError('Checksum calculation is not implemented for' 'this checksum strategy.') def is_valid(self, value, checksum=None): ''' Validates a string against the checksum. This abstract base class provides an elementary checksum validation method. Advanced validation methods should be implemented in subclasses when possible. ''' body = value if checksum is None: (body, checksum) = self.split(value) return self.checksum(body) == checksum def split(self, value): ''' Splits the string including a checksum according to the checksum algorithm used. ''' raise NotImplementedError('Splitting is not implemented for this ' 'checksum strategy.') def _prepare(self, body): ''' Method to prepare the body string for checksum calculation. ''' return [int(d) for d in str(body)] class Checksum(object): ''' Checksum context class. Provides different checksum calculation and verification algorithms by acting as a factory class. ''' _strategies = {} def __init__(self, strategy=None, body=None): ''' Checksum context class constructor. :param strategy : name of the used checksum algorithm :param body : string that the checksum is calculated for ''' self._strategy = None self._body = None self.strategy = strategy self.body = body # Setters and getters # ------------------- @property def body(self): ''' Getter for the body property. ''' return self._body @body.setter def body(self, value): ''' Setter for the body property. ''' if value is not None: self._body = value else: self._body = '' @property def strategy(self): ''' Getter for the strategy property. ''' return self._strategy @strategy.setter def strategy(self, value): ''' Setter for the strategy property. ''' if value is None: return if value in self._strategies: strategy = self._strategies[value]() else: raise NotImplementedError('Checksum strategy %s is not ' 'implemented.' % value) if (isinstance(strategy, ChecksumStrategy) and type(strategy) != ChecksumStrategy): self._strategy = strategy else: raise TypeError( 'Strategy requires a subclass of ChecksumStrategy.' ' Got instead %s.' % type(strategy)) def checksum(self): ''' Calculates the checksum using selected algorithm for the body string. ''' if self.strategy is not None: return self.strategy.checksum(self._body) def is_valid(self, value, checksum=None): ''' Validates either a string containing a checksum or a body string and a against separately provided checksum. ''' if self.strategy is not None: return self.strategy.is_valid(value, checksum) def split(self, value): ''' Splits a string containing a body and a checksum according to the conventions of selected checksum algorithm. ''' if self.strategy is not None: return self.strategy.split(value) def type(self): ''' Returns the name of used checksum algorithm. ''' if self.strategy is not None: return self.strategy.name else: return None @classmethod def register_strategy(cls, strategy_cls): ''' Registers a checksum strategy class in the available checksum strategies. ''' strategy = strategy_cls() if (isinstance(strategy, ChecksumStrategy) and type(strategy) != ChecksumStrategy): cls._strategies[strategy_cls.name] = strategy_cls else: raise TypeError( 'Strategy requires a subclass of ChecksumStrategy.' ' Got instead %s.' % type(strategy)) @classmethod def list_strategies(cls): ''' Lists all the available strategies for checksum calculation. ''' return cls._strategies.keys()
mit
-6,618,260,355,587,514,000
27.660714
79
0.580685
false
5.073762
false
false
false
jovencoda/evoca-v2
evoca_v2/core/migrations/0022_auto_20170820_0036.py
1
1202
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-08-20 00:36 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('core', '0021_channel_image'), ] operations = [ migrations.CreateModel( name='ChannelTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('uniqueID', models.UUIDField(default=uuid.uuid4, editable=False)), ('name', models.CharField(max_length=255)), ('slug', models.SlugField(blank=True, null=True)), ], ), migrations.AlterField( model_name='channel', name='image', field=models.ImageField(blank=True, null=True, upload_to='static/img/'), ), migrations.AddField( model_name='channeltag', name='related_channel', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_channel', to='core.Channel'), ), ]
gpl-3.0
819,544,230,349,932,200
32.388889
132
0.587354
false
4.173611
false
false
false
skosukhin/spack
var/spack/repos/builtin/packages/r-alsace/package.py
1
2082
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAlsace(RPackage): """Alternating Least Squares (or Multivariate Curve Resolution) for analytical chemical data, in particular hyphenated data where the first direction is a retention time axis, and the second a spectral axis. Package builds on the basic als function from the ALS package and adds functionality for high-throughput analysis, including definition of time windows, clustering of profiles, retention time correction, etcetera.""" homepage = "https://www.bioconductor.org/packages/alsace/" url = "https://git.bioconductor.org/packages/alsace" version('1.12.0', git='https://git.bioconductor.org/packages/alsace', commit='1364c65bbff05786d05c02799fd44fd57748fae3') depends_on('r-als', type=('build', 'run')) depends_on('r-ptw', type=('build', 'run'))
lgpl-2.1
-4,289,728,429,612,582,000
47.418605
124
0.694524
false
4.027079
false
false
false
lancms/lancms2
fabfile.py
1
5721
from fabric.api import * from fabric.colors import green, red from fabric.contrib import files import datetime import os def _environment (): env.release = datetime.datetime.now().strftime ("%Y-%m-%d-%H%M%S") env.project_name = 'lancms2' # FIXME: hardcoded path: env.path_home = '/opt/lancms2/' env.path_root = os.path.join (env.path_home, 'deployment/') env.path_current = os.path.join (env.path_root, 'current') env.path_releases = os.path.join (env.path_root, 'releases/') env.path_full_release = os.path.join (env.path_releases, env.release) env.path_full_release_local_settings = os.path.join (env.path_full_release, 'lancms2/local_settings.py') env.path_full_release_local_sqlite = os.path.join (env.path_full_release, 'lancms2.sql') env.path_apache2_sites_available = '/etc/apache2/sites-available/' env.filename_apacheconf = 'apache2-wsgi-virtualhost.conf' env.virenv = 'source %s/virtualenv/bin/activate' % env.path_root # FIXME: hardcoded user and group: env.owner_user = 'www-data' env.owner_group = 'lancms2' def _upload_and_unpack (): # local is on local host local ('bzr export --format=tgz %s.tar.gz' % env.release); # run is on remote host! run ('mkdir -p %s' % env.path_full_release) # put places local file on remote server put ('%s.tar.gz' % env.release, env.path_releases, mode=0750) local ('rm -f %s.tar.gz' % env.release) with cd ('%s' % env.path_releases): run ('tar -xzf %s.tar.gz' % env.release) run ('rm %s.tar.gz' % env.release) print (green ('Uploaded and unpacked')) def _create_virtualenv (): with cd ('%s' % env.path_root): run ('virtualenv virtualenv -p python3') run ('source %svirtualenv/bin/activate' % env.path_root) print (green ('Created (or recreated) virtual environment')) def _set_release_permissions (): sudo ('chown %s:%s -R %s' % (env.owner_user, env.owner_group, env.path_full_release), shell=False) sudo ('chmod g+w -R %s' % (env.path_full_release), shell=False) print (green ('Set permissions for www-data on %s' % env.path_full_release)) def _install_requirements (): with cd ('%s' % env.path_full_release): run ('source %svirtualenv/bin/activate; pip install -r requirements.txt' % env.path_root) print (green ('Installed requirements in virtual environment')) def _symlink_local_settings (): path_file = os.path.join (env.path_home, 'LOCAL_SETTINGS.py') if files.exists (path_file): run ('ln -s %s %s' % (path_file, env.path_full_release_local_settings)) print (green ('Symlinked local_settings')) def _symlink_local_sqlite (): path_file = os.path.join (env.path_home, 'LANCMS2.sql') if files.exists (path_file): run ('ln -s %s %s' % (path_file, env.path_full_release_local_sqlite)) print (green ('Symlinked local sqlite')) def _symlink_current_release (): if files.exists (env.path_current): run ('rm -f %s' % env.path_current) print (red ('Removed symlink for previous release')) run ('ln -s %s %s' % (env.path_full_release, env.path_current)) print (green ('Symlinked current release %s to %s' % (env.release, env.path_current))) def _check_hosts (): if not env.hosts or env.hosts == "": import sys print "" print red("Missing hosts. Printing helptext.") help () sys.exit () def _install_local_requirements (): path_file = os.path.join (env.path_home, 'REQUIREMENTS.txt') if files.exists (path_file): with cd ('%s' % env.path_full_release): run ('source %svirtualenv/bin/activate; pip install -r %s' % (env.path_root, path_file)) print (green ('Installed local requirements (%s) in virtual environment' % path_file)) else: print (red ('No local requirements (%s)' % path_file)) def _syncdb (): with cd (env.path_current): run ('source %svirtualenv/bin/activate; ./manage.py syncdb --noinput' % env.path_root) print (green ('Ran syncdb')) def _migrate (): with cd (env.path_current): run ('source %svirtualenv/bin/activate; ./manage.py migrate' % env.path_root) print (green ('Ran migrate')) def _restart_webserver (): # FIXME: this could be too Debian specific for real reuse. I don't know, haven't used anything but Debian in a long while. :-) sudo ('/usr/sbin/service apache2 restart', shell=False) print (green ('Restarted apache2')) def _configure_webserver (): path_sfile = os.path.join (env.path_current, env.filename_apacheconf) if files.exists (path_sfile): path_dfile = os.path.join (env.path_apache2_sites_available, env.project_name) sudo ('/bin/cp -f %s %s' % (path_sfile, path_dfile), shell=False) sudo ('/usr/sbin/a2ensite %s' % env.project_name, shell=False) print (green ('Configured apache2 and activated site')) else: print (red ("Didn't configure apache2, no config file found.")) def _collectstatic (): with cd (env.path_current): run ('source %svirtualenv/bin/activate; ./manage.py collectstatic --noinput' % env.path_root) print (green ('Ran collectstatic')) def _put_revision_number (): local ('bzr revno > /tmp/%s' % env.release) put ('/tmp/%s' % env.release, '%s/.bzr_rev' % env.path_full_release, mode=0750) local ('rm /tmp/%s' % env.release) def deploy (): _check_hosts () _environment () _upload_and_unpack () _create_virtualenv () _install_requirements () _install_local_requirements () _symlink_local_settings () _symlink_local_sqlite () _symlink_current_release () _syncdb () _migrate () _collectstatic () _configure_webserver () _restart_webserver () _put_revision_number () _set_release_permissions () def help (): print "" print "deployment script for lancms2" print "" print "Only available command is 'deploy'." print "Remember to define host (-H [email protected])" print "Please don't use this if you don't know what it does! No warranties!"
gpl-2.0
-6,103,830,826,268,953,000
31.87931
127
0.68502
false
2.932342
false
false
false
garywu/pypedream
pypedream/plot/_filt.py
1
2685
import numpy has_matplotlib = True try: from matplotlib import pyplot, figure except ImportError: has_matplotlib = False from dagpype._core import filters def _make_relay_call(fn, name): def new_fn(*args, **kwargs): @filters def _dagpype_internal_fn_act(target): try: while True: target.send((yield)) except GeneratorExit: fn(*args, **kwargs) target.close() return _dagpype_internal_fn_act new_fn.__name__ = name new_fn.__doc__ = """ Convenience filter utility for corresponding function in pyplot. Example: >>> source([1, 2, 3, 4]) | plot.xlabel('x') | plot.ylabel('y') | plot.title('xy') | (plot.plot() | plot.savefig('foo.png')) """ return new_fn _try_fns = [ 'annotate', 'arrow', 'autogen_docstring', 'autoscale', 'autumn', 'axes', 'axhline', 'axhspan', 'axis', 'axvline', 'axvspan', 'barbs', 'bone', 'box', 'broken_barh', 'cla', 'clabel', 'clf', 'clim', 'cm', 'cohere', 'colorbar', 'colormaps', 'colors', 'connect', 'cool', 'copper', 'csd', 'dedent', 'delaxes', 'docstring', 'draw', 'figaspect', 'figimage', 'figlegend', 'figtext', 'figure', 'fill', 'fill_between', 'fill_betweenx', 'flag', 'gca', 'gcf', 'gci', 'get', 'gray', 'grid', 'hold', 'hot', 'hsv', 'jet', 'locator_params', 'margins', 'minorticks_off', 'minorticks_on', 'normalize', 'over', 'pcolor', 'pcolormesh', 'pink', 'plotfile', 'plotting', 'polar', 'prism', 'psd', 'quiver', 'quiverkey', 'rc', 'register_cmap', 'rgrids', 'sca', 'sci', 'set_cmap', 'setp', 'silent_list', 'specgram', 'spectral', 'spring', 'spy', 'stem', 'step', 'subplot', 'subplot2grid', 'subplot_tool', 'subplots', 'subplots_adjust', 'summer', 'suptitle', 'table', 'text', 'thetagrids', 'tick_params', 'ticklabel_format', 'tight_layout', 'title', 'tricontour', 'tricontourf', 'tripcolor', 'triplot', 'twinx', 'twiny', 'winter', 'xlabel', 'xlim', 'xscale', 'xticks', 'ylabel', 'ylim', 'yscale', 'yticks'] _fns = [] if has_matplotlib: for fn in _try_fns: try: exec('%s = _make_relay_call(pyplot.%s, "%s")' % (fn, fn, fn)) _fns.append(fn) except AttributeError: pass
bsd-3-clause
-3,559,330,889,597,093,000
16.211538
131
0.480447
false
3.262454
false
false
false
ericleasemorgan/EEBO-TCP-Workset-Browser
bin/make-index.py
1
2042
#!/usr/bin/env python # make-index.py - read EEBO TEI files and output word frequencies as well as a "book" # Eric Lease Morgan <[email protected]> # June 8, 2015 - first investigations; bases on HathiTrust work # configure STOPWORDS = './etc/stopwords-en.txt' # require import operator import re import sys import libxml2 # sanity check if ( len( sys.argv ) != 2 ) | ( sys.stdin.isatty() ) : print "Usage: cat <xml> |", sys.argv[ 0 ], '<-b|-d>' quit() # get input; sanity check flag = sys.argv[ 1 ] # build a book? if flag == '-b' : build_book = 1 elif flag == '-d' : build_book = 0 else : print "Usage: cat <xml> |", sys.argv[ 0 ], '<-b|-d>' quit() # create an xpath parser with an xml file xml = sys.stdin.read() tei = libxml2.parseMemory( xml, len( xml ) ) context = tei.xpathNewContext() context.xpathRegisterNs( 't', 'http://www.tei-c.org/ns/1.0' ) # parse title = context.xpathEval( '/t:TEI/t:teiHeader/t:fileDesc/t:titleStmt/t:title/text()' )[ 0 ] text = context.xpathEval( '/t:TEI/t:text' )[ 0 ].content # normalize the text text = re.sub( '\s+', ' ', text ) text = text.lower() text = text.split() # initialize output words = {} book = str( title ) + '\n' # create a list of (English) stopwords stopwords = {} with open ( STOPWORDS ) as DATABASE : for record in DATABASE : stopwords[ record.rstrip() ] = 1 # process each word in the text for word in text : # normalize some more; probably not 100% accurate word = word.rstrip( '?:!.,;)' ) word = word.lstrip( '?:!.,;(' ) # filter out unwanted words if len( word ) < 2 : continue if re.match( '\d|\W', word ) : continue if word in stopwords : continue # build text file if build_book : book = book + word + ' ' # or update the dictionary else : words[ word ] = words.get( word, 0 ) + 1 # output book, or if build_book : print book # output the dictionary else : for tuple in sorted( words.items(), key=operator.itemgetter( 1 ), reverse=True ) : print( tuple[ 0 ] + '\t' + str( tuple[ 1 ] ) ) # done quit()
gpl-2.0
-2,400,946,947,724,106,000
22.744186
92
0.629285
false
2.851955
false
false
false
inexactually/irisbot
utils.py
1
2817
import aiohttp import inspect import io import discord from discord.ext import commands import settings def setting(name, default): return getattr(settings, name, default) def pretty_list(names, bold=True, conjunction='and', empty=''): names = list(names) if not names: return empty if bold: names = ['**{}**'.format(name) for name in names] sep = ' ' + conjunction if conjunction else '' if len(names) == 1: return names[0] elif len(names) == 2: return '{}{} {}'.format(names[0], sep, names[1]) else: return '{},{} {}'.format(', '.join(names[:-1]), sep, names[-1]) def is_local_check_failure(error): """This horrible hack lets a command error handler figure out if the error originates from the command's own checks, rather than a global check or some other sort of error. """ if isinstance(error, commands.CheckFailure): if error.args: return "check functions for command" in error.args[0] # Copied from discord.ext.commands.bot.py. We need this because # there's no way to override the formatting of the defualt Bot.reply. def bot_get_variable(name): stack = inspect.stack() try: for frames in stack: try: frame = frames[0] current_locals = frame.f_locals if name in current_locals: return current_locals[name] finally: del frame finally: del stack class Bot(commands.Bot): """A subclass of `discord.ext.commands.Bot` with some improvements. """ async def reply(self, content, *args, separator=' ', **kwargs): # Now with custom separator support author = bot_get_variable('_internal_author') text = '{0.mention}{1}{2}'.format(author, separator, str(content)) return await self.say(text, *args, **kwargs) async def send_file(self, destination, fp, *, filename=None, content=None, embed=None, tts=False): # Now with embed support channel_id, guild_id = await self._resolve_destination(destination) if embed is not None: embed = embed.to_dict() try: with open(fp, 'rb') as f: buffer = io.BytesIO(f.read()) if filename is None: _, filename = path_split(fp) except TypeError: buffer = fp content = str(content) if content is not None else None data = await self.http.send_file(channel_id, buffer, guild_id=guild_id, filename=filename, content=content, embed=embed, tts=tts) channel = self.get_channel(data.get('channel_id')) message = self.connection._create_message(channel=channel, **data) return message
mit
5,766,240,977,364,690,000
33.353659
102
0.599219
false
4.00142
false
false
false
censof/ansible-deployment
django_app_server_db_server/deployment/templates/common.py
1
3942
import os.path # Configuration modules. from ._installed_apps import * from ._middleware import * from ._context_processors import * from ._email import * from ._eclaim import * _ = lambda s: s # Debugging mode. DEBUG = False TEMPLATE_DEBUG = False if DEMO_MODE: SEND_NOTIF_EMAILS = False else: SEND_NOTIF_EMAILS = True # Project root directory. _path = os.path.join(os.path.dirname(__file__), os.pardir) BASE_DIR = os.path.abspath(os.path.join(_path, os.pardir)) # SQL scripts directory. _parpath = os.path.join(BASE_DIR, os.pardir) SQL_SCRIPTS_DIR = os.path.abspath(os.path.join(_parpath, 'sql_scripts')) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'wk9&6^ns(71^*i#8&=v#j53-cv#85csvl53zu4dp$w0x(k%zsz' ALLOWED_HOSTS = ['{{ ansible_eth0.ipv4.address }}'] if DEMO_MODE: HOST_URL = 'http://{{ ansible_eth0.ipv4.address }}:{}/'.format(DEMO_PORT) else: HOST_URL = 'http://{{ ansible_eth0.ipv4.address }}/' LOGIN_URL = '/eclaim/login/' ROOT_URLCONF = 'eclaim.urls' WSGI_APPLICATION = 'wsgi.application' # Absolute path to the directory that holds static files. STATIC_ROOT = '{{ django_app_home }}/static_files' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # Compress static files. COMPRESS_ENABLED = True # Absolute path to the directory that holds media files. MEDIA_ROOT = '{{ django_app_home }}/media_files' MEDIA_URL = '/media/' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'templates'), ) # Django Rest Framework. REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', 'PAGE_SIZE': 100 } MINI_PAGE_SIZE = 20 # Sphinx documentation. DOCS_ROOT = os.path.join(BASE_DIR, 'docs/_build/html') DOCS_ACCESS = 'login_required' # public/login_required/staff/superuser # Internationalization. # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en' LANGUAGES = ( ('en', _('English')), ('ms', _('Bahasa Malaysia')), ) LOCALE_PATHS = ( os.path.join(BASE_DIR, 'locale'), ) TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # JavaScript Internationalization (i18n) JS_I18N_PACKAGES = ( 'eclaim.masterfiles', 'eclaim.settings' ) # Caching. CACHE_TIMEOUT = 7 * 86400 # 7 days CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': os.path.join(BASE_DIR, 'cache'), 'TIMEOUT': CACHE_TIMEOUT }, } # Logging. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", 'datefmt': "%d/%b/%Y %H:%M:%S" }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'file': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': os.path.abspath('eclaim.log'), 'formatter': 'verbose' }, }, 'loggers': { 'django': { 'handlers': ['file'], 'propagate': True, 'level': 'DEBUG', }, 'ECLAIM': { 'handlers': ['file'], 'level': 'DEBUG', }, } }
mit
-4,356,248,494,672,155,600
22.746988
88
0.627093
false
3.12114
false
true
false
quantumlib/Cirq
cirq-core/cirq/experiments/purity_estimation.py
1
2467
# Copyright 2020 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Sequence import numpy as np def purity_from_probabilities( hilbert_space_dimension: int, probabilities: Sequence[float], ) -> float: """Purity estimator from speckle purity benchmarking. Estimates purity from empirical probabilities of observed bitstrings. This estimator assumes that the circuit used in experiment is sufficiently scrambling that its output probabilities follow the Porter-Thomas distribution. This assumption holds for typical instances of random quantum circuits of sufficient depth. The state resulting from the experimental implementation of the circuit is modeled as ρ = p |𝜓⟩⟨𝜓| + (1 - p) I / D where |𝜓⟩ is a pure state, I / D is the maximally mixed state, and p is between 0 and 1. The purity of this state is given by p**2. If p = 1, then the bitstring probabilities are modeled as being drawn from the Porter-Thomas distribution, with probability density function given by f(x) = (D - 1) (1 - x)**(D - 2). The mean of this distribution is 1 / D and its variance is (D - 1) / [D**2 (D + 1)]. In general, the variance of the distribution is multipled by p**2. Therefore, the purity can be computed by dividing the variance of the empirical probabilities by the Porter-Thomas variance (D - 1) / [D**2 (D + 1)]. Args: hilbert_space_dimension: Dimension of the Hilbert space on which the quantum circuits acts. probabilities: Empirical probabilities of bitstrings observed in experiment. Returns: Estimate of the purity of the state resulting from the experimental implementation of a quantum circuit. """ D = hilbert_space_dimension porter_thomas_variance = (D - 1) / (D + 1) / D ** 2 return np.var(probabilities) / porter_thomas_variance
apache-2.0
758,926,273,269,246,500
38.532258
79
0.707874
false
3.909091
false
false
false
ActiveState/code
recipes/Python/578414_Takuzu_solver/recipe-578414.py
1
4263
# Copyright 2013 Eviatar Bach, [email protected] # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implementation of a Takuzu solver. A Takuzu board consists of a square grid of binary cells. There must be an equal number of 0s and 1s in every row and column, no duplicate rows or columns, and no more than two of the same bit consecutive in every row and column. """ from constraint_solver import pywrapcp N = None BOARD1 = [[N, 1, N, 0], [N, N, 0, N], [N, 0, N, N], [1, 1, N, 0]] BOARD2 = [[N, 1, N, N, N, 0], [1, N, N, N, N, 1], [N, N, 0, N, N, N], [1, N, N, N, N, N], [N, N, N, 0, N, 0], [N, N, N, N, 1, N]] BOARD3 = [[N, N, N, 1, N, N, N, N, N, N], [N, 0, N, N, N, 0, N, N, N, 1], [1, N, 1, 1, N, N, N, 1, N, N], [N, N, N, N, N, 0, N, N, N, N], [N, 1, N, N, N, N, N, N, 0, N], [0, N, N, N, 0, N, N, N, 0, N], [N, 1, N, N, N, 0, N, N, N, N], [1, N, N, N, 1, N, 1, N, N, N], [1, 1, N, 0, N, N, N, N, N, N], [N, N, N, N, N, N, N, 1, N, N]] def valid(board): ''' Checks whether a board has no duplicate rows or columns. This is needed to filter out invalid solutions from the constraint solver. ''' return ((len(set(map(tuple, board))) == len(board)) and (len(set(zip(*board))) == len(board))) def solve(board): ''' Solves a Takuzu board, with None for empty (unsolved) spaces ''' assert len(set(map(len, board))) == 1 # all row lengths are the same assert len(board) == len(board[0]) # width and height are the same assert len(board) % 2 == 0 # board has even dimensions line_sum = len(board) / 2 # the number to which all rows and columns sum line = range(len(board)) # line and row indices solver = pywrapcp.Solver('takuzu') grid = {} for i in line: for j in line: grid[(i, j)] = solver.IntVar(0, 1, 'grid %i %i' % (i, j)) # initial values for i in line: for j in line: if board[i][j] is not None: solver.Add(grid[(i, j)] == board[i][j]) # no three consecutive elements in rows or columns for i in line: for j in range(len(board) - 2): solver.Add(solver.SumGreaterOrEqual([grid[(i, jl)] for jl in line[j:j + 3]], 1)) solver.Add(solver.SumLessOrEqual([grid[(i, jl)] for jl in line[j:j + 3]], 2)) solver.Add(solver.SumGreaterOrEqual([grid[(jl, i)] for jl in line[j:j + 3]], 1)) solver.Add(solver.SumLessOrEqual([grid[(jl, i)] for jl in line[j:j + 3]], 2)) # rows and columns sum to half the size for i in line: solver.Add(solver.SumEquality([grid[(i, j)] for j in line], line_sum)) for j in line: solver.Add(solver.SumEquality([grid[(i, j)] for i in line], line_sum)) # regroup all variables into a list all_vars = [grid[(i, j)] for i in line for j in line] # create search phases vars_phase = solver.Phase(all_vars, solver.INT_VAR_SIMPLE, solver.INT_VALUE_SIMPLE) # search for all solutions and remove those with duplicate rows or columns solver.NewSearch(vars_phase) solutions = [] while solver.NextSolution(): solutions.append([[int(grid[(i, j)].Value()) for j in line] for i in line]) solver.EndSearch() solutions = filter(valid, solutions) assert len(solutions) == 1 # there should be only one solution return solutions[0] for row in solve(BOARD3): print row
mit
-8,761,855,008,896,713,000
31.792308
78
0.553366
false
3.249238
false
false
false
vbraun/oxford-strings
app/calendar_view.py
1
6003
# -*- coding: utf-8 -*- """ Calendaring Page Views """ import sys import os import uuid import logging from datetime import date, datetime, timedelta from webapp2 import uri_for from google.appengine.api import users import app.config as config from app.base_view import RequestHandler from app.decorators import cached_property, requires_login, requires_admin from app.event_model import Event class CalendarAdmin(RequestHandler): def get_events(self): """ Return all future events """ now = datetime.combine(date.today(), datetime.min.time()) return Event.query(Event.start_date >= now).order(Event.start_date).fetch(100) def get(self): self.cache_must_revalidate() values = dict() values['sync_url'] = uri_for('cron-sync') values['full_url'] = uri_for('calendar-admin') values['calendar_admin_url'] = self.request.uri values['calendar'] = self.get_events() self.render_response('calendar_admin.html', **values) @requires_admin def post(self): key_id = self.request.get('key_id') active = (self.request.get('active') == u'true') ev = Event.get_by_id(int(key_id)) ev.active = active ev.put() class EventListing(RequestHandler): def get_events(self): """ Return all future events """ now = datetime.combine(date.today(), datetime.min.time()) query = Event.query(Event.start_date >= now, Event.active == True) return query.order(Event.start_date).fetch(100) def get_template(self): raise NotImplementedError def get(self): self.cache_must_revalidate() values = dict() # values['edit_url'] = uri_for('calendar-new') values['sync_url'] = uri_for('cron-sync') values['calendar_admin_url'] = uri_for('calendar-admin') values['calendar'] = self.get_events() values['abstract_intro'] = config.abstract_intro self.render_response(self.get_template(), **values) self.response.md5_etag() class IcalExport(EventListing): def _ical_time(self, dt): import pytz import time dt = pytz.utc.localize(dt) return time.strftime('%Y%m%dT%H%M%SZ', dt.timetuple()) def get(self): from icalendar import Calendar, Event, vCalAddress, vText cal = Calendar() cal.add('prodid', '-//Strings Oxford Calendaring//strings.ox.ac.uk//') cal.add('version', '2.0') cal.add('X-WR-CALNAME', 'Strings Oxford') for ev in self.get_events(): event = Event() event['uid'] = vText(ev.uid) event['location'] = vText(ev.location) event['summary'] = ev.title event['dtstart'] = self._ical_time(ev.start_date) event['dtend'] = self._ical_time(ev.end_date) desc = u'Speaker: {}\n'.format(ev.speaker) desc += u'Location: {}\n'.format(ev.location) desc += u'Series: {}\n'.format(ev.series) desc += ev.description event['description'] = vText(desc) cal.add_component(event) #self.response.headers['Content-Type'] = 'text/plain' self.response.headers['Content-Type'] = 'text/calendar' self.response.write(cal.to_ical()) class Seminars(EventListing): def get_template(self): return 'calendar.html' class JuniorSeminar(EventListing): def get_events(self): """ Return all future events in the string theory junior seminar series """ now = datetime.combine(date.today(), datetime.min.time()) query = Event.query( Event.series == 'Strings Junior Seminar', Event.start_date >= now, Event.active == True) return query.order(Event.start_date).fetch(100) def get_template(self): return 'junior_seminar.html' class ThisWeek(EventListing): def get_template(self): return 'this_week.html' def get_start_date(self): """ Return the date of the last Saturday """ today = date.today() # today.weekday in {0, ..., 6} switches to "0" on Monday key_day = today + timedelta(days=2) # we want to switch calendar on saturday return today - timedelta(days=key_day.weekday()) def get_events(self): last_saturday = self.get_start_date() next_saturday = last_saturday + timedelta(weeks=1) t0 = datetime.combine(last_saturday, datetime.min.time()) t1 = datetime.combine(next_saturday, datetime.max.time()) # allow for week-spanning events would be ideally: # query = Event.query(Event.start_date <= t1, Event.end_date >= t0) # but inequality queries can currently be only on one property query = Event.query( Event.start_date >= t0, Event.start_date < t1, Event.active == True) return query.order(Event.start_date).fetch(100) class NextWeek(ThisWeek): def get_template(self): return 'next_week.html' def get_start_date(self): """ Return the date of the next Saturday """ return ThisWeek.get_start_date(self) + timedelta(weeks=1) class ThisWeekEmail(ThisWeek): def get_template(self): return 'this_week_email.html' class CalendarEdit(EventListing): """ TODO: do we really want to edit events ourselves? """ def get_event(self, key_id): if key_id is not None: return Event.get_by_id(int(key_id)) uid = str(uuid.uuid4()) ev = Event(uid=uid, editable=True, active=True) ev.start_date = datetime.utcnow() ev.end_date = datetime.utcnow() ev.put() return ev def get(self, uid=None): values = dict() values['calendar'] = [self.get_event(uid)] self.render_response('calendar.html', **values)
gpl-2.0
2,358,411,222,917,641,000
28.717822
86
0.596702
false
3.691882
false
false
false
gajim/python-nbxmpp
nbxmpp/modules/rsm.py
1
1846
# Copyright (C) 2020 Philipp Hörist <philipp AT hoerist.com> # # This file is part of nbxmpp. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; If not, see <http://www.gnu.org/licenses/>. from nbxmpp.namespaces import Namespace from nbxmpp.structs import RSMData def parse_rsm(stanza): stanza = stanza.getTag('set', namespace=Namespace.RSM) if stanza is None: return None after = stanza.getTagData('after') or None before = stanza.getTagData('before') or None last = stanza.getTagData('last') or None first_index = None first = stanza.getTagData('first') or None if first is not None: try: first_index = int(first.getAttr('index')) except Exception: pass try: count = int(stanza.getTagData('count')) except Exception: count = None try: max_ = int(stanza.getTagData('max')) except Exception: max_ = None try: index = int(stanza.getTagData('index')) except Exception: index = None return RSMData(after=after, before=before, last=last, first=first, first_index=first_index, count=count, max=max_, index=index)
gpl-3.0
1,084,813,893,102,950,700
29.245902
70
0.635772
false
4.164786
false
false
false
Zouyiran/ryu
ryu/services/protocols/bgp/utils/internable.py
1
3260
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import weakref from six.moves import intern dict_name = intern('_internable_dict') # # Internable # class Internable(object): """Class that allows instances to be 'interned'. That is, given an instance of this class, one can obtain a canonical (interned) copy. This saves memory when there are likely to be many identical instances of the class -- users hold references to a single interned object instead of references to different objects that are identical. The interned version of a given instance is created on demand if necessary, and automatically cleaned up when nobody holds a reference to it. Instances of sub-classes must be usable as dictionary keys for Internable to work. """ class Stats(object): def __init__(self): self.d = {} def incr(self, name): self.d[name] = self.d.get(name, 0) + 1 def __repr__(self): return repr(self.d) def __str__(self): return str(self.d) @classmethod def _internable_init(kls): # Objects to be interned are held as keys in a dictionary that # only holds weak references to keys. As a result_backup, when the # last reference to an interned object goes away, the object # will be removed from the dictionary. kls._internable_dict = weakref.WeakKeyDictionary() kls._internable_stats = Internable.Stats() @classmethod def intern_stats(kls): return kls._internable_stats def intern(self): """Returns either itself or a canonical copy of itself.""" # If this is an interned object, return it if hasattr(self, '_interned'): return self._internable_stats.incr('self') # # Got to find or create an interned object identical to this # one. Auto-initialize the class if need be. # kls = self.__class__ if not hasattr(kls, dict_name): kls._internable_init() obj = kls._internable_dict.get(self) if (obj): # Found an interned copy. kls._internable_stats.incr('found') return obj # Create an interned copy. Take care to only keep a weak # reference to the object itself. def object_collected(obj): kls._internable_stats.incr('collected') # print("Object %s garbage collected" % obj) pass ref = weakref.ref(self, object_collected) kls._internable_dict[self] = ref self._interned = True kls._internable_stats.incr('inserted') return self
apache-2.0
151,006,402,060,610,500
30.650485
74
0.643865
false
4.163474
false
false
false
ParashRahman/Database-Project
Part1/record_violation.py
1
15513
from application import Application from error_checker import ErrorChecker from errors import InvalidDateException import add_person class RecordViolation(Application): def start_application(self, c): self.cursor = c self.list_of_inputs = [ None for i in range(8) ] self.get_violation_no(0) self.fields = [ "Violator no.", # 1 "Vehicle id", # 2 "Office no.", # 3 "Violation type", # 4 "Violation date", # 5 "Place", # 6 "Description", # 7 "Insert into database", # 8 "Exit: Cancel entering violation" ] # 9 self.cursor.execute( "SELECT * FROM ticket" ) self.metadata = self.cursor.description while ( True ): self.print_field_options( ) choice = self.get_input( len(self.fields) ) if ( choice == 1 ): self.get_violator_no(choice) elif ( choice == 2 ): self.get_vehicle_id(choice) elif ( choice == 3 ): self.get_office_no(choice) elif ( choice == 4 ): self.get_violation_type(choice) elif ( choice == 5 ): self.get_violation_date(choice) elif ( choice == 6 ): self.get_violation_place(choice) elif ( choice == 7 ): self.get_violation_description(choice) # Enter data into db option elif ( choice == 8 ): inserted = self.insert_into_database() if ( inserted ): return else: continue # Exit option elif ( choice == 9 ): return # helper function for printing options def print_field_options( self, fields = None, showEmpty = True ): if ( fields == None ): fields = self.fields print( "Enter a field option to edit: " ) for i in range( len( fields ) ): print ( "[{:}] ".format( i+1 ) + fields[i] + (" EMPTY" if showEmpty and i < 7 and not self.list_of_inputs[i+1] else "") ) # returns the integer input choice def get_input( self, num_choices, prompt = "Choose a field to edit or an option: ", fields = None, showEmpty = True ): if ( fields == None ): fields = self.fields print( prompt ) try: string_input = input() choice = int(string_input) except: choice = "Invalid" while ( type( choice ) is not int or choice >= num_choices + 1 or choice <= 0 ): self.print_field_options(fields, showEmpty) print( "Enter a valid integer choice: " ) try: string_input = input() choice = int(string_input) except: choice = "Invalid" return choice ################################### # GENERATE VIOLATION NO. ################################### def get_violation_no( self, index ): # gets the list of ids and adds 1 to the max numbers = self.cursor.execute( "SELECT ticket_no FROM ticket" ).fetchall() self.list_of_inputs[index] = max([ ID[0] for ID in numbers ]) + 1 ################################### # GET VIOLATOR NO. ################################### def get_violator_no(self, index): # initial get and check user_input = input("Enter the violator's SIN " "(Enter nothing to cancel): ") # initial check if user wants to cancel if ( len( user_input ) == 0 ): return # initial check for if violator exists exists = False self.cursor.execute("SELECT SIN FROM people") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] if ( user_input.strip().lower() in rows ): exists = True # While the input string is too long or the violator does not exist short_enough = ErrorChecker.check_error(self.metadata[index], user_input) while ( not short_enough or not exists): if ( not short_enough ): user_input = input("Your input was too long. " "Enter the violator's SIN " "(Enter nothing to cancel): ") elif ( not exists ): char_answer = "" while ( char_answer.strip().lower() not in [ 'y', 'n' ] ): char_answer = input( "The violator is not in the database. " "Would you like to add the person? (y/n): " ) if ( char_answer == 'y' ): a = add_person.AddPerson() a.start_application(self.cursor) self.cursor.execute("SELECT SIN FROM people") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] user_input = input("Enter the violator's SIN (Enter " "nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( user_input.strip().lower() in rows ): exists = True else: exists = False short_enough = ErrorChecker.check_error(self.metadata[index], user_input) self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower()) ################################### # GET VEHICLE ID ################################### def get_vehicle_id(self, index): # initial get and check user_input = input("Enter the vehicle serial number " "(Enter nothing to cancel): ") # initial check if user wants to cancel if ( len( user_input ) == 0 ): return # initial check for if violator exists exists = False self.cursor.execute("SELECT serial_no FROM vehicle") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] if ( user_input.strip().lower() in rows ): exists = True # While the input string is too long or the violator does not exist short_enough = ErrorChecker.check_error(self.metadata[index], user_input) while ( not short_enough or not exists): if ( not short_enough ): user_input = input("Your input was too long. " "Enter the vehicle serial number " "(Enter nothing to cancel): ") elif ( not exists ): user_input = input("The vehicle is not in the database. " "Enter the violator's SIN (Enter " "nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( user_input.strip().lower() in rows ): exists = True else: exists = False short_enough = ErrorChecker.check_error(self.metadata[index], user_input) self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower()) ################################### # GET OFFICE NO. ################################### def get_office_no(self, index): # initial get and check user_input = input("Enter the office number " "(Enter nothing to cancel): ") # initial check if user wants to cancel if ( len( user_input ) == 0 ): return # initial check for if violator exists exists = False self.cursor.execute("SELECT SIN FROM people") rows = self.cursor.fetchall() rows = [ row[0].strip().lower() for row in rows ] if ( user_input.strip().lower() in rows ): exists = True # While the input string is too long or the violator does not exist short_enough = ErrorChecker.check_error(self.metadata[index], user_input) while ( not short_enough or not exists): if ( not short_enough ): user_input = input("Your input was too long. " "Enter the office number " "(Enter nothing to cancel): ") elif ( not exists ): user_input = input("The office is not in the database. " "Enter the office number (Enter " "nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( user_input.strip().lower() in rows ): exists = True else: exists = False short_enough = ErrorChecker.check_error(self.metadata[index], user_input) self.list_of_inputs[index] = "'{:}'".format(user_input.strip().lower()) ################################### # GET VIOLATION TYPE ################################### def get_violation_type(self, index): self.cursor.execute( "SELECT * FROM ticket_type" ) list_of_types = self.cursor.fetchall() prompt_types = [ row[0] + " $" + str(row[1]) for row in list_of_types ] self.print_field_options( prompt_types, False ) user_input = self.get_input(len( prompt_types ), "Pick a violation type", prompt_types, False ) self.list_of_inputs[index] = "'{:}'".format(list_of_types[user_input-1][0]) ################################### # GET VIOLATION DATE ################################### def get_violation_date(self, index): while ( True ): date_input = input ( "Enter the date ( DD/MM/YYYY ) " "(Enter nothing to cancel): ") if ( len( date_input ) == 0 ): return date_input = date_input.split('/') try: if len(date_input) != 3: raise InvalidDateException() for component in date_input: if ( not ErrorChecker.check_str_int(component) ): raise InvalidDateException() date_input = [ int(comp) for comp in date_input ] if (not ErrorChecker.check_error(self.metadata[index], date_input)): raise InvalidDateException() break except ( InvalidDateException ): print( "Your date was invalid" ) if ( date_input != None ): d = date_input[0] m = date_input[1] y = date_input[2] self.list_of_inputs[index] = [ "'{:}/{:}/{:}'".format(d,m,y), "'DD/MM/YYYY'" ] ################################### # GET VIOLATOR PLACE ################################### def get_violation_place(self, index): while ( True ): user_input = input("Enter the place of the violation " "(Enter nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( ErrorChecker.check_error( self.metadata[index], user_input ) ): break else: print( "Your input was too long" ) self.list_of_inputs[index] = "'{:}'".format(user_input) ################################### # GET VIOLATOR DESCRIPTION ################################### def get_violation_description(self, index): while ( True ): user_input = input("Enter the description of the violation " "(Enter nothing to cancel): ") if ( len( user_input ) == 0 ): return if ( ErrorChecker.check_error( self.metadata[index], user_input ) ): break else: print( "Your input was too long" ) self.list_of_inputs[index] = "'{:}'".format(user_input) ################################### # INSERT INTO DATABASE ################################### def insert_into_database(self): # check if fields are empty unfinished = False for inp in self.list_of_inputs: if ( inp == None ): unfinished = True if ( unfinished ): print( "You have left some fields blank." ) char_answer = "" while ( char_answer.strip().lower() not in [ 'y', 'n' ] ): char_answer = input( "Would you like to continue saving (y/n)? " ) if ( char_answer == 'n' ): return False # change all Nones in input to "NULL" for i in range( len( self.list_of_inputs ) ): if ( self.list_of_inputs[i] == None ): self.list_of_inputs[i] = "NULL" # prepare date for insertion if ( self.list_of_inputs[5] != "NULL" ): self.list_of_inputs[5] = "TO_DATE( {:}, {:} )".format( self.list_of_inputs[5][0], self.list_of_inputs[5][1] ) # attempt to charge primary owner if vehicle entered # and violator is not if ( self.list_of_inputs[2] != "NULL" and self.list_of_inputs[1] == "NULL" ): statement = "SELECT o.owner_id FROM owner o, " \ "vehicle v where v.serial_no = o.vehicle_id " \ "and o.is_primary_owner = 'y' and v.serial_no = " + \ self.list_of_inputs[2] primary_owner = self.cursor.execute( statement ).fetchall() if ( len( primary_owner ) == 0 ): # Do nothing pass else: primary_owner = "'{:}'".format( primary_owner[0][0] ) self.list_of_inputs[1] = primary_owner statement = "INSERT INTO ticket VALUES( " \ "{:}, {:}, {:}, {:}, {:}, {:}, {:}, {:} )".format( self.list_of_inputs[0], self.list_of_inputs[1], self.list_of_inputs[2], self.list_of_inputs[3], self.list_of_inputs[4], self.list_of_inputs[5], self.list_of_inputs[6], self.list_of_inputs[7] ) self.cursor.execute( statement ) return True def change_owner(self,owner_sin,vehicle_id,is_primary_owner): statement="delete from owner where vehicle_id='{}'".format(str(vehicle_id)) self.cursor.execute(statement) value_statement='('+"'"+str(owner_sin)+"'"+','+"'"+str(vehicle_id)+"'"+','+"'"+str(is_primary_owner)+"'"+')' statement2="insert into owner values"+value_statement try: self.cursor.execute(statement2) except Exception as e: print("Error! cannot add an owner record") return
apache-2.0
8,663,207,515,949,221,000
37.20936
116
0.460517
false
4.513529
false
false
false
fifengine/fifengine-demos
pychan_demo/colortester.py
1
7852
# -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2013 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### """ pychan demo app for testing rgba colors on widgets """ from builtins import str from pychan_demo import PyChanExample from fife.extensions import pychan class ColorExample(PyChanExample): """ a small app (^^) to show how fifechan uses colors on various widgets """ def __init__(self): super(ColorExample,self).__init__('gui/colortester.xml') def start(self): """ load XML file and setup callbacks """ self.widget = pychan.loadXML(self.xmlFile) self.widget.mapEvents({ 'base_rslider': self.update_basecolor, 'base_gslider': self.update_basecolor, 'base_bslider': self.update_basecolor, 'base_aslider': self.update_basecolor, 'background_rslider': self.update_background_color, 'background_gslider': self.update_background_color, 'background_bslider': self.update_background_color, 'background_aslider': self.update_background_color, 'foreground_rslider': self.update_foreground_color, 'foreground_gslider': self.update_foreground_color, 'foreground_bslider': self.update_foreground_color, 'foreground_aslider': self.update_foreground_color, 'selection_rslider': self.update_selection_color, 'selection_gslider': self.update_selection_color, 'selection_bslider': self.update_selection_color, 'selection_aslider': self.update_selection_color, 'closeButton':self.stop, }) # alpha value needs to be set, otherwise you don't see colors ;-) self.widget.findChild(name="base_aslider").value = float(255) self.widget.findChild(name="background_aslider").value = float(255) self.widget.findChild(name="foreground_aslider").value = float(255) self.widget.findChild(name="selection_aslider").value = float(255) # init stuff self.update_basecolor() self.update_background_color() self.update_foreground_color() self.update_selection_color() self.widget.show() def update_basecolor(self): """ Update rgba base colors of all examples and show the values """ r = int(self.widget.findChild(name="base_rslider").value) g = int(self.widget.findChild(name="base_gslider").value) b = int(self.widget.findChild(name="base_bslider").value) a = int(self.widget.findChild(name="base_aslider").value) # update slider labels self.widget.findChild(name="base_rvalue").text = str(r) self.widget.findChild(name="base_gvalue").text = str(g) self.widget.findChild(name="base_bvalue").text = str(b) self.widget.findChild(name="base_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").base_color = rgba self.widget.findChild(name="example2").base_color = rgba self.widget.findChild(name="example3").base_color = rgba self.widget.findChild(name="example4").base_color = rgba self.widget.findChild(name="example5").base_color = rgba self.widget.findChild(name="example6").base_color = rgba self.widget.findChild(name="example7").base_color = rgba self.widget.findChild(name="example8").base_color = rgba self.widget.findChild(name="example9").base_color = rgba def update_background_color(self): """ Update rgba background colors of all examples and show the values """ r = int(self.widget.findChild(name="background_rslider").value) g = int(self.widget.findChild(name="background_gslider").value) b = int(self.widget.findChild(name="background_bslider").value) a = int(self.widget.findChild(name="background_aslider").value) # update slider labels self.widget.findChild(name="background_rvalue").text = str(r) self.widget.findChild(name="background_gvalue").text = str(g) self.widget.findChild(name="background_bvalue").text = str(b) self.widget.findChild(name="background_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").background_color = rgba self.widget.findChild(name="example2").background_color = rgba self.widget.findChild(name="example3").background_color = rgba self.widget.findChild(name="example4").background_color = rgba self.widget.findChild(name="example5").background_color = rgba self.widget.findChild(name="example6").background_color = rgba self.widget.findChild(name="example7").background_color = rgba self.widget.findChild(name="example8").background_color = rgba self.widget.findChild(name="example9").background_color = rgba def update_selection_color(self): """ Update rgba selection colors of all examples and show the values """ r = int(self.widget.findChild(name="selection_rslider").value) g = int(self.widget.findChild(name="selection_gslider").value) b = int(self.widget.findChild(name="selection_bslider").value) a = int(self.widget.findChild(name="selection_aslider").value) # update slider labels self.widget.findChild(name="selection_rvalue").text = str(r) self.widget.findChild(name="selection_gvalue").text = str(g) self.widget.findChild(name="selection_bvalue").text = str(b) self.widget.findChild(name="selection_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").selection_color = rgba self.widget.findChild(name="example2").selection_color = rgba self.widget.findChild(name="example3").selection_color = rgba self.widget.findChild(name="example4").selection_color = rgba self.widget.findChild(name="example5").selection_color = rgba self.widget.findChild(name="example6").selection_color = rgba self.widget.findChild(name="example7").selection_color = rgba self.widget.findChild(name="example8").selection_color = rgba self.widget.findChild(name="example9").selection_color = rgba def update_foreground_color(self): """ Update rgba foreground colors of all examples and show the values """ r = int(self.widget.findChild(name="foreground_rslider").value) g = int(self.widget.findChild(name="foreground_gslider").value) b = int(self.widget.findChild(name="foreground_bslider").value) a = int(self.widget.findChild(name="foreground_aslider").value) # update slider labels self.widget.findChild(name="foreground_rvalue").text = str(r) self.widget.findChild(name="foreground_gvalue").text = str(g) self.widget.findChild(name="foreground_bvalue").text = str(b) self.widget.findChild(name="foreground_avalue").text = str(a) rgba = (r, g, b, a) self.widget.findChild(name="example1").foreground_color = rgba self.widget.findChild(name="example2").foreground_color = rgba self.widget.findChild(name="example3").foreground_color = rgba self.widget.findChild(name="example4").foreground_color = rgba self.widget.findChild(name="example5").foreground_color = rgba self.widget.findChild(name="example6").foreground_color = rgba self.widget.findChild(name="example7").foreground_color = rgba self.widget.findChild(name="example8").foreground_color = rgba self.widget.findChild(name="example9").foreground_color = rgba
lgpl-2.1
3,314,907,281,187,378,700
41.673913
74
0.717142
false
3.220673
false
false
false